1 /* 2 * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/module.h> 33 #include <linux/list.h> 34 #include <linux/workqueue.h> 35 #include <linux/skbuff.h> 36 #include <linux/timer.h> 37 #include <linux/notifier.h> 38 #include <linux/inetdevice.h> 39 #include <linux/ip.h> 40 #include <linux/tcp.h> 41 #include <linux/if_vlan.h> 42 43 #include <net/neighbour.h> 44 #include <net/netevent.h> 45 #include <net/route.h> 46 #include <net/tcp.h> 47 #include <net/ip6_route.h> 48 #include <net/addrconf.h> 49 50 #include <rdma/ib_addr.h> 51 52 #include "iw_cxgb4.h" 53 54 static char *states[] = { 55 "idle", 56 "listen", 57 "connecting", 58 "mpa_wait_req", 59 "mpa_req_sent", 60 "mpa_req_rcvd", 61 "mpa_rep_sent", 62 "fpdu_mode", 63 "aborting", 64 "closing", 65 "moribund", 66 "dead", 67 NULL, 68 }; 69 70 static int nocong; 71 module_param(nocong, int, 0644); 72 MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)"); 73 74 static int enable_ecn; 75 module_param(enable_ecn, int, 0644); 76 MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)"); 77 78 static int dack_mode = 1; 79 module_param(dack_mode, int, 0644); 80 MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)"); 81 82 int c4iw_max_read_depth = 8; 83 module_param(c4iw_max_read_depth, int, 0644); 84 MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)"); 85 86 static int enable_tcp_timestamps; 87 module_param(enable_tcp_timestamps, int, 0644); 88 MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); 89 90 static int enable_tcp_sack; 91 module_param(enable_tcp_sack, int, 0644); 92 MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)"); 93 94 static int enable_tcp_window_scaling = 1; 95 module_param(enable_tcp_window_scaling, int, 0644); 96 MODULE_PARM_DESC(enable_tcp_window_scaling, 97 "Enable tcp window scaling (default=1)"); 98 99 int c4iw_debug; 100 module_param(c4iw_debug, int, 0644); 101 MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)"); 102 103 static int peer2peer = 1; 104 module_param(peer2peer, int, 0644); 105 MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)"); 106 107 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; 108 module_param(p2p_type, int, 0644); 109 MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: " 110 "1=RDMA_READ 0=RDMA_WRITE (default 1)"); 111 112 static int ep_timeout_secs = 60; 113 module_param(ep_timeout_secs, int, 0644); 114 MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " 115 "in seconds (default=60)"); 116 117 static int mpa_rev = 1; 118 module_param(mpa_rev, int, 0644); 119 MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " 120 "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft" 121 " compliant (default=1)"); 122 123 static int markers_enabled; 124 module_param(markers_enabled, int, 0644); 125 MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)"); 126 127 static int crc_enabled = 1; 128 module_param(crc_enabled, int, 0644); 129 MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)"); 130 131 static int rcv_win = 256 * 1024; 132 module_param(rcv_win, int, 0644); 133 MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)"); 134 135 static int snd_win = 128 * 1024; 136 module_param(snd_win, int, 0644); 137 MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)"); 138 139 static struct workqueue_struct *workq; 140 141 static struct sk_buff_head rxq; 142 143 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); 144 static void ep_timeout(unsigned long arg); 145 static void connect_reply_upcall(struct c4iw_ep *ep, int status); 146 147 static LIST_HEAD(timeout_list); 148 static spinlock_t timeout_lock; 149 150 static void deref_qp(struct c4iw_ep *ep) 151 { 152 c4iw_qp_rem_ref(&ep->com.qp->ibqp); 153 clear_bit(QP_REFERENCED, &ep->com.flags); 154 } 155 156 static void ref_qp(struct c4iw_ep *ep) 157 { 158 set_bit(QP_REFERENCED, &ep->com.flags); 159 c4iw_qp_add_ref(&ep->com.qp->ibqp); 160 } 161 162 static void start_ep_timer(struct c4iw_ep *ep) 163 { 164 PDBG("%s ep %p\n", __func__, ep); 165 if (timer_pending(&ep->timer)) { 166 pr_err("%s timer already started! ep %p\n", 167 __func__, ep); 168 return; 169 } 170 clear_bit(TIMEOUT, &ep->com.flags); 171 c4iw_get_ep(&ep->com); 172 ep->timer.expires = jiffies + ep_timeout_secs * HZ; 173 ep->timer.data = (unsigned long)ep; 174 ep->timer.function = ep_timeout; 175 add_timer(&ep->timer); 176 } 177 178 static int stop_ep_timer(struct c4iw_ep *ep) 179 { 180 PDBG("%s ep %p stopping\n", __func__, ep); 181 del_timer_sync(&ep->timer); 182 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 183 c4iw_put_ep(&ep->com); 184 return 0; 185 } 186 return 1; 187 } 188 189 static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, 190 struct l2t_entry *l2e) 191 { 192 int error = 0; 193 194 if (c4iw_fatal_error(rdev)) { 195 kfree_skb(skb); 196 PDBG("%s - device in error state - dropping\n", __func__); 197 return -EIO; 198 } 199 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); 200 if (error < 0) 201 kfree_skb(skb); 202 return error < 0 ? error : 0; 203 } 204 205 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) 206 { 207 int error = 0; 208 209 if (c4iw_fatal_error(rdev)) { 210 kfree_skb(skb); 211 PDBG("%s - device in error state - dropping\n", __func__); 212 return -EIO; 213 } 214 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); 215 if (error < 0) 216 kfree_skb(skb); 217 return error < 0 ? error : 0; 218 } 219 220 static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) 221 { 222 struct cpl_tid_release *req; 223 224 skb = get_skb(skb, sizeof *req, GFP_KERNEL); 225 if (!skb) 226 return; 227 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); 228 INIT_TP_WR(req, hwtid); 229 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); 230 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); 231 c4iw_ofld_send(rdev, skb); 232 return; 233 } 234 235 static void set_emss(struct c4iw_ep *ep, u16 opt) 236 { 237 ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 238 sizeof(struct iphdr) - sizeof(struct tcphdr); 239 ep->mss = ep->emss; 240 if (GET_TCPOPT_TSTAMP(opt)) 241 ep->emss -= 12; 242 if (ep->emss < 128) 243 ep->emss = 128; 244 if (ep->emss & 7) 245 PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n", 246 GET_TCPOPT_MSS(opt), ep->mss, ep->emss); 247 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt), 248 ep->mss, ep->emss); 249 } 250 251 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) 252 { 253 enum c4iw_ep_state state; 254 255 mutex_lock(&epc->mutex); 256 state = epc->state; 257 mutex_unlock(&epc->mutex); 258 return state; 259 } 260 261 static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 262 { 263 epc->state = new; 264 } 265 266 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 267 { 268 mutex_lock(&epc->mutex); 269 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]); 270 __state_set(epc, new); 271 mutex_unlock(&epc->mutex); 272 return; 273 } 274 275 static void *alloc_ep(int size, gfp_t gfp) 276 { 277 struct c4iw_ep_common *epc; 278 279 epc = kzalloc(size, gfp); 280 if (epc) { 281 kref_init(&epc->kref); 282 mutex_init(&epc->mutex); 283 c4iw_init_wr_wait(&epc->wr_wait); 284 } 285 PDBG("%s alloc ep %p\n", __func__, epc); 286 return epc; 287 } 288 289 void _c4iw_free_ep(struct kref *kref) 290 { 291 struct c4iw_ep *ep; 292 293 ep = container_of(kref, struct c4iw_ep, com.kref); 294 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); 295 if (test_bit(QP_REFERENCED, &ep->com.flags)) 296 deref_qp(ep); 297 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { 298 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); 299 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); 300 dst_release(ep->dst); 301 cxgb4_l2t_release(ep->l2t); 302 } 303 if (test_bit(RELEASE_MAPINFO, &ep->com.flags)) { 304 print_addr(&ep->com, __func__, "remove_mapinfo/mapping"); 305 iwpm_remove_mapinfo(&ep->com.local_addr, 306 &ep->com.mapped_local_addr); 307 iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW); 308 } 309 kfree(ep); 310 } 311 312 static void release_ep_resources(struct c4iw_ep *ep) 313 { 314 set_bit(RELEASE_RESOURCES, &ep->com.flags); 315 c4iw_put_ep(&ep->com); 316 } 317 318 static int status2errno(int status) 319 { 320 switch (status) { 321 case CPL_ERR_NONE: 322 return 0; 323 case CPL_ERR_CONN_RESET: 324 return -ECONNRESET; 325 case CPL_ERR_ARP_MISS: 326 return -EHOSTUNREACH; 327 case CPL_ERR_CONN_TIMEDOUT: 328 return -ETIMEDOUT; 329 case CPL_ERR_TCAM_FULL: 330 return -ENOMEM; 331 case CPL_ERR_CONN_EXIST: 332 return -EADDRINUSE; 333 default: 334 return -EIO; 335 } 336 } 337 338 /* 339 * Try and reuse skbs already allocated... 340 */ 341 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) 342 { 343 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) { 344 skb_trim(skb, 0); 345 skb_get(skb); 346 skb_reset_transport_header(skb); 347 } else { 348 skb = alloc_skb(len, gfp); 349 } 350 t4_set_arp_err_handler(skb, NULL, NULL); 351 return skb; 352 } 353 354 static struct net_device *get_real_dev(struct net_device *egress_dev) 355 { 356 return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev; 357 } 358 359 static int our_interface(struct c4iw_dev *dev, struct net_device *egress_dev) 360 { 361 int i; 362 363 egress_dev = get_real_dev(egress_dev); 364 for (i = 0; i < dev->rdev.lldi.nports; i++) 365 if (dev->rdev.lldi.ports[i] == egress_dev) 366 return 1; 367 return 0; 368 } 369 370 static struct dst_entry *find_route6(struct c4iw_dev *dev, __u8 *local_ip, 371 __u8 *peer_ip, __be16 local_port, 372 __be16 peer_port, u8 tos, 373 __u32 sin6_scope_id) 374 { 375 struct dst_entry *dst = NULL; 376 377 if (IS_ENABLED(CONFIG_IPV6)) { 378 struct flowi6 fl6; 379 380 memset(&fl6, 0, sizeof(fl6)); 381 memcpy(&fl6.daddr, peer_ip, 16); 382 memcpy(&fl6.saddr, local_ip, 16); 383 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) 384 fl6.flowi6_oif = sin6_scope_id; 385 dst = ip6_route_output(&init_net, NULL, &fl6); 386 if (!dst) 387 goto out; 388 if (!our_interface(dev, ip6_dst_idev(dst)->dev) && 389 !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) { 390 dst_release(dst); 391 dst = NULL; 392 } 393 } 394 395 out: 396 return dst; 397 } 398 399 static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip, 400 __be32 peer_ip, __be16 local_port, 401 __be16 peer_port, u8 tos) 402 { 403 struct rtable *rt; 404 struct flowi4 fl4; 405 struct neighbour *n; 406 407 rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip, 408 peer_port, local_port, IPPROTO_TCP, 409 tos, 0); 410 if (IS_ERR(rt)) 411 return NULL; 412 n = dst_neigh_lookup(&rt->dst, &peer_ip); 413 if (!n) 414 return NULL; 415 if (!our_interface(dev, n->dev) && 416 !(n->dev->flags & IFF_LOOPBACK)) { 417 dst_release(&rt->dst); 418 return NULL; 419 } 420 neigh_release(n); 421 return &rt->dst; 422 } 423 424 static void arp_failure_discard(void *handle, struct sk_buff *skb) 425 { 426 PDBG("%s c4iw_dev %p\n", __func__, handle); 427 kfree_skb(skb); 428 } 429 430 /* 431 * Handle an ARP failure for an active open. 432 */ 433 static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) 434 { 435 struct c4iw_ep *ep = handle; 436 437 printk(KERN_ERR MOD "ARP failure duing connect\n"); 438 kfree_skb(skb); 439 connect_reply_upcall(ep, -EHOSTUNREACH); 440 state_set(&ep->com, DEAD); 441 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 442 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 443 dst_release(ep->dst); 444 cxgb4_l2t_release(ep->l2t); 445 c4iw_put_ep(&ep->com); 446 } 447 448 /* 449 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant 450 * and send it along. 451 */ 452 static void abort_arp_failure(void *handle, struct sk_buff *skb) 453 { 454 struct c4iw_rdev *rdev = handle; 455 struct cpl_abort_req *req = cplhdr(skb); 456 457 PDBG("%s rdev %p\n", __func__, rdev); 458 req->cmd = CPL_ABORT_NO_RST; 459 c4iw_ofld_send(rdev, skb); 460 } 461 462 static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) 463 { 464 unsigned int flowclen = 80; 465 struct fw_flowc_wr *flowc; 466 int i; 467 468 skb = get_skb(skb, flowclen, GFP_KERNEL); 469 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); 470 471 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) | 472 FW_FLOWC_WR_NPARAMS(8)); 473 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen, 474 16)) | FW_WR_FLOWID(ep->hwtid)); 475 476 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 477 flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8); 478 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 479 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); 480 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 481 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan); 482 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 483 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid); 484 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 485 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq); 486 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 487 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); 488 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 489 flowc->mnemval[6].val = cpu_to_be32(ep->snd_win); 490 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 491 flowc->mnemval[7].val = cpu_to_be32(ep->emss); 492 /* Pad WR to 16 byte boundary */ 493 flowc->mnemval[8].mnemonic = 0; 494 flowc->mnemval[8].val = 0; 495 for (i = 0; i < 9; i++) { 496 flowc->mnemval[i].r4[0] = 0; 497 flowc->mnemval[i].r4[1] = 0; 498 flowc->mnemval[i].r4[2] = 0; 499 } 500 501 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 502 c4iw_ofld_send(&ep->com.dev->rdev, skb); 503 } 504 505 static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp) 506 { 507 struct cpl_close_con_req *req; 508 struct sk_buff *skb; 509 int wrlen = roundup(sizeof *req, 16); 510 511 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 512 skb = get_skb(NULL, wrlen, gfp); 513 if (!skb) { 514 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); 515 return -ENOMEM; 516 } 517 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 518 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 519 req = (struct cpl_close_con_req *) skb_put(skb, wrlen); 520 memset(req, 0, wrlen); 521 INIT_TP_WR(req, ep->hwtid); 522 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, 523 ep->hwtid)); 524 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 525 } 526 527 static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) 528 { 529 struct cpl_abort_req *req; 530 int wrlen = roundup(sizeof *req, 16); 531 532 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 533 skb = get_skb(skb, wrlen, gfp); 534 if (!skb) { 535 printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 536 __func__); 537 return -ENOMEM; 538 } 539 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 540 t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure); 541 req = (struct cpl_abort_req *) skb_put(skb, wrlen); 542 memset(req, 0, wrlen); 543 INIT_TP_WR(req, ep->hwtid); 544 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); 545 req->cmd = CPL_ABORT_SEND_RST; 546 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 547 } 548 549 /* 550 * c4iw_form_pm_msg - Form a port mapper message with mapping info 551 */ 552 static void c4iw_form_pm_msg(struct c4iw_ep *ep, 553 struct iwpm_sa_data *pm_msg) 554 { 555 memcpy(&pm_msg->loc_addr, &ep->com.local_addr, 556 sizeof(ep->com.local_addr)); 557 memcpy(&pm_msg->rem_addr, &ep->com.remote_addr, 558 sizeof(ep->com.remote_addr)); 559 } 560 561 /* 562 * c4iw_form_reg_msg - Form a port mapper message with dev info 563 */ 564 static void c4iw_form_reg_msg(struct c4iw_dev *dev, 565 struct iwpm_dev_data *pm_msg) 566 { 567 memcpy(pm_msg->dev_name, dev->ibdev.name, IWPM_DEVNAME_SIZE); 568 memcpy(pm_msg->if_name, dev->rdev.lldi.ports[0]->name, 569 IWPM_IFNAME_SIZE); 570 } 571 572 static void c4iw_record_pm_msg(struct c4iw_ep *ep, 573 struct iwpm_sa_data *pm_msg) 574 { 575 memcpy(&ep->com.mapped_local_addr, &pm_msg->mapped_loc_addr, 576 sizeof(ep->com.mapped_local_addr)); 577 memcpy(&ep->com.mapped_remote_addr, &pm_msg->mapped_rem_addr, 578 sizeof(ep->com.mapped_remote_addr)); 579 } 580 581 static void best_mtu(const unsigned short *mtus, unsigned short mtu, 582 unsigned int *idx, int use_ts) 583 { 584 unsigned short hdr_size = sizeof(struct iphdr) + 585 sizeof(struct tcphdr) + 586 (use_ts ? 12 : 0); 587 unsigned short data_size = mtu - hdr_size; 588 589 cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx); 590 } 591 592 static int send_connect(struct c4iw_ep *ep) 593 { 594 struct cpl_act_open_req *req; 595 struct cpl_t5_act_open_req *t5_req; 596 struct cpl_act_open_req6 *req6; 597 struct cpl_t5_act_open_req6 *t5_req6; 598 struct sk_buff *skb; 599 u64 opt0; 600 u32 opt2; 601 unsigned int mtu_idx; 602 int wscale; 603 int wrlen; 604 int sizev4 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ? 605 sizeof(struct cpl_act_open_req) : 606 sizeof(struct cpl_t5_act_open_req); 607 int sizev6 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ? 608 sizeof(struct cpl_act_open_req6) : 609 sizeof(struct cpl_t5_act_open_req6); 610 struct sockaddr_in *la = (struct sockaddr_in *) 611 &ep->com.mapped_local_addr; 612 struct sockaddr_in *ra = (struct sockaddr_in *) 613 &ep->com.mapped_remote_addr; 614 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *) 615 &ep->com.mapped_local_addr; 616 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *) 617 &ep->com.mapped_remote_addr; 618 int win; 619 620 wrlen = (ep->com.remote_addr.ss_family == AF_INET) ? 621 roundup(sizev4, 16) : 622 roundup(sizev6, 16); 623 624 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); 625 626 skb = get_skb(NULL, wrlen, GFP_KERNEL); 627 if (!skb) { 628 printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 629 __func__); 630 return -ENOMEM; 631 } 632 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 633 634 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, 635 enable_tcp_timestamps); 636 wscale = compute_wscale(rcv_win); 637 638 /* 639 * Specify the largest window that will fit in opt0. The 640 * remainder will be specified in the rx_data_ack. 641 */ 642 win = ep->rcv_win >> 10; 643 if (win > RCV_BUFSIZ_MASK) 644 win = RCV_BUFSIZ_MASK; 645 646 opt0 = (nocong ? NO_CONG(1) : 0) | 647 KEEP_ALIVE(1) | 648 DELACK(1) | 649 WND_SCALE(wscale) | 650 MSS_IDX(mtu_idx) | 651 L2T_IDX(ep->l2t->idx) | 652 TX_CHAN(ep->tx_chan) | 653 SMAC_SEL(ep->smac_idx) | 654 DSCP(ep->tos) | 655 ULP_MODE(ULP_MODE_TCPDDP) | 656 RCV_BUFSIZ(win); 657 opt2 = RX_CHANNEL(0) | 658 CCTRL_ECN(enable_ecn) | 659 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 660 if (enable_tcp_timestamps) 661 opt2 |= TSTAMPS_EN(1); 662 if (enable_tcp_sack) 663 opt2 |= SACK_EN(1); 664 if (wscale && enable_tcp_window_scaling) 665 opt2 |= WND_SCALE_EN(1); 666 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 667 opt2 |= T5_OPT_2_VALID; 668 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); 669 } 670 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure); 671 672 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { 673 if (ep->com.remote_addr.ss_family == AF_INET) { 674 req = (struct cpl_act_open_req *) skb_put(skb, wrlen); 675 INIT_TP_WR(req, 0); 676 OPCODE_TID(req) = cpu_to_be32( 677 MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 678 ((ep->rss_qid << 14) | ep->atid))); 679 req->local_port = la->sin_port; 680 req->peer_port = ra->sin_port; 681 req->local_ip = la->sin_addr.s_addr; 682 req->peer_ip = ra->sin_addr.s_addr; 683 req->opt0 = cpu_to_be64(opt0); 684 req->params = cpu_to_be32(cxgb4_select_ntuple( 685 ep->com.dev->rdev.lldi.ports[0], 686 ep->l2t)); 687 req->opt2 = cpu_to_be32(opt2); 688 } else { 689 req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen); 690 691 INIT_TP_WR(req6, 0); 692 OPCODE_TID(req6) = cpu_to_be32( 693 MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 694 ((ep->rss_qid<<14)|ep->atid))); 695 req6->local_port = la6->sin6_port; 696 req6->peer_port = ra6->sin6_port; 697 req6->local_ip_hi = *((__be64 *) 698 (la6->sin6_addr.s6_addr)); 699 req6->local_ip_lo = *((__be64 *) 700 (la6->sin6_addr.s6_addr + 8)); 701 req6->peer_ip_hi = *((__be64 *) 702 (ra6->sin6_addr.s6_addr)); 703 req6->peer_ip_lo = *((__be64 *) 704 (ra6->sin6_addr.s6_addr + 8)); 705 req6->opt0 = cpu_to_be64(opt0); 706 req6->params = cpu_to_be32(cxgb4_select_ntuple( 707 ep->com.dev->rdev.lldi.ports[0], 708 ep->l2t)); 709 req6->opt2 = cpu_to_be32(opt2); 710 } 711 } else { 712 u32 isn = (prandom_u32() & ~7UL) - 1; 713 714 opt2 |= T5_OPT_2_VALID; 715 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ 716 if (peer2peer) 717 isn += 4; 718 719 if (ep->com.remote_addr.ss_family == AF_INET) { 720 t5_req = (struct cpl_t5_act_open_req *) 721 skb_put(skb, wrlen); 722 INIT_TP_WR(t5_req, 0); 723 OPCODE_TID(t5_req) = cpu_to_be32( 724 MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 725 ((ep->rss_qid << 14) | ep->atid))); 726 t5_req->local_port = la->sin_port; 727 t5_req->peer_port = ra->sin_port; 728 t5_req->local_ip = la->sin_addr.s_addr; 729 t5_req->peer_ip = ra->sin_addr.s_addr; 730 t5_req->opt0 = cpu_to_be64(opt0); 731 t5_req->params = cpu_to_be64(V_FILTER_TUPLE( 732 cxgb4_select_ntuple( 733 ep->com.dev->rdev.lldi.ports[0], 734 ep->l2t))); 735 t5_req->rsvd = cpu_to_be32(isn); 736 PDBG("%s snd_isn %u\n", __func__, 737 be32_to_cpu(t5_req->rsvd)); 738 t5_req->opt2 = cpu_to_be32(opt2); 739 } else { 740 t5_req6 = (struct cpl_t5_act_open_req6 *) 741 skb_put(skb, wrlen); 742 INIT_TP_WR(t5_req6, 0); 743 OPCODE_TID(t5_req6) = cpu_to_be32( 744 MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 745 ((ep->rss_qid<<14)|ep->atid))); 746 t5_req6->local_port = la6->sin6_port; 747 t5_req6->peer_port = ra6->sin6_port; 748 t5_req6->local_ip_hi = *((__be64 *) 749 (la6->sin6_addr.s6_addr)); 750 t5_req6->local_ip_lo = *((__be64 *) 751 (la6->sin6_addr.s6_addr + 8)); 752 t5_req6->peer_ip_hi = *((__be64 *) 753 (ra6->sin6_addr.s6_addr)); 754 t5_req6->peer_ip_lo = *((__be64 *) 755 (ra6->sin6_addr.s6_addr + 8)); 756 t5_req6->opt0 = cpu_to_be64(opt0); 757 t5_req6->params = (__force __be64)cpu_to_be32( 758 cxgb4_select_ntuple( 759 ep->com.dev->rdev.lldi.ports[0], 760 ep->l2t)); 761 t5_req6->rsvd = cpu_to_be32(isn); 762 PDBG("%s snd_isn %u\n", __func__, 763 be32_to_cpu(t5_req6->rsvd)); 764 t5_req6->opt2 = cpu_to_be32(opt2); 765 } 766 } 767 768 set_bit(ACT_OPEN_REQ, &ep->com.history); 769 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 770 } 771 772 static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, 773 u8 mpa_rev_to_use) 774 { 775 int mpalen, wrlen; 776 struct fw_ofld_tx_data_wr *req; 777 struct mpa_message *mpa; 778 struct mpa_v2_conn_params mpa_v2_params; 779 780 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 781 782 BUG_ON(skb_cloned(skb)); 783 784 mpalen = sizeof(*mpa) + ep->plen; 785 if (mpa_rev_to_use == 2) 786 mpalen += sizeof(struct mpa_v2_conn_params); 787 wrlen = roundup(mpalen + sizeof *req, 16); 788 skb = get_skb(skb, wrlen, GFP_KERNEL); 789 if (!skb) { 790 connect_reply_upcall(ep, -ENOMEM); 791 return; 792 } 793 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 794 795 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 796 memset(req, 0, wrlen); 797 req->op_to_immdlen = cpu_to_be32( 798 FW_WR_OP(FW_OFLD_TX_DATA_WR) | 799 FW_WR_COMPL(1) | 800 FW_WR_IMMDLEN(mpalen)); 801 req->flowid_len16 = cpu_to_be32( 802 FW_WR_FLOWID(ep->hwtid) | 803 FW_WR_LEN16(wrlen >> 4)); 804 req->plen = cpu_to_be32(mpalen); 805 req->tunnel_to_proxy = cpu_to_be32( 806 FW_OFLD_TX_DATA_WR_FLUSH(1) | 807 FW_OFLD_TX_DATA_WR_SHOVE(1)); 808 809 mpa = (struct mpa_message *)(req + 1); 810 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); 811 mpa->flags = (crc_enabled ? MPA_CRC : 0) | 812 (markers_enabled ? MPA_MARKERS : 0) | 813 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); 814 mpa->private_data_size = htons(ep->plen); 815 mpa->revision = mpa_rev_to_use; 816 if (mpa_rev_to_use == 1) { 817 ep->tried_with_mpa_v1 = 1; 818 ep->retry_with_mpa_v1 = 0; 819 } 820 821 if (mpa_rev_to_use == 2) { 822 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 823 sizeof (struct mpa_v2_conn_params)); 824 mpa_v2_params.ird = htons((u16)ep->ird); 825 mpa_v2_params.ord = htons((u16)ep->ord); 826 827 if (peer2peer) { 828 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 829 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) 830 mpa_v2_params.ord |= 831 htons(MPA_V2_RDMA_WRITE_RTR); 832 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) 833 mpa_v2_params.ord |= 834 htons(MPA_V2_RDMA_READ_RTR); 835 } 836 memcpy(mpa->private_data, &mpa_v2_params, 837 sizeof(struct mpa_v2_conn_params)); 838 839 if (ep->plen) 840 memcpy(mpa->private_data + 841 sizeof(struct mpa_v2_conn_params), 842 ep->mpa_pkt + sizeof(*mpa), ep->plen); 843 } else 844 if (ep->plen) 845 memcpy(mpa->private_data, 846 ep->mpa_pkt + sizeof(*mpa), ep->plen); 847 848 /* 849 * Reference the mpa skb. This ensures the data area 850 * will remain in memory until the hw acks the tx. 851 * Function fw4_ack() will deref it. 852 */ 853 skb_get(skb); 854 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 855 BUG_ON(ep->mpa_skb); 856 ep->mpa_skb = skb; 857 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 858 start_ep_timer(ep); 859 __state_set(&ep->com, MPA_REQ_SENT); 860 ep->mpa_attr.initiator = 1; 861 ep->snd_seq += mpalen; 862 return; 863 } 864 865 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) 866 { 867 int mpalen, wrlen; 868 struct fw_ofld_tx_data_wr *req; 869 struct mpa_message *mpa; 870 struct sk_buff *skb; 871 struct mpa_v2_conn_params mpa_v2_params; 872 873 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 874 875 mpalen = sizeof(*mpa) + plen; 876 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) 877 mpalen += sizeof(struct mpa_v2_conn_params); 878 wrlen = roundup(mpalen + sizeof *req, 16); 879 880 skb = get_skb(NULL, wrlen, GFP_KERNEL); 881 if (!skb) { 882 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); 883 return -ENOMEM; 884 } 885 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 886 887 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 888 memset(req, 0, wrlen); 889 req->op_to_immdlen = cpu_to_be32( 890 FW_WR_OP(FW_OFLD_TX_DATA_WR) | 891 FW_WR_COMPL(1) | 892 FW_WR_IMMDLEN(mpalen)); 893 req->flowid_len16 = cpu_to_be32( 894 FW_WR_FLOWID(ep->hwtid) | 895 FW_WR_LEN16(wrlen >> 4)); 896 req->plen = cpu_to_be32(mpalen); 897 req->tunnel_to_proxy = cpu_to_be32( 898 FW_OFLD_TX_DATA_WR_FLUSH(1) | 899 FW_OFLD_TX_DATA_WR_SHOVE(1)); 900 901 mpa = (struct mpa_message *)(req + 1); 902 memset(mpa, 0, sizeof(*mpa)); 903 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 904 mpa->flags = MPA_REJECT; 905 mpa->revision = ep->mpa_attr.version; 906 mpa->private_data_size = htons(plen); 907 908 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 909 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 910 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 911 sizeof (struct mpa_v2_conn_params)); 912 mpa_v2_params.ird = htons(((u16)ep->ird) | 913 (peer2peer ? MPA_V2_PEER2PEER_MODEL : 914 0)); 915 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? 916 (p2p_type == 917 FW_RI_INIT_P2PTYPE_RDMA_WRITE ? 918 MPA_V2_RDMA_WRITE_RTR : p2p_type == 919 FW_RI_INIT_P2PTYPE_READ_REQ ? 920 MPA_V2_RDMA_READ_RTR : 0) : 0)); 921 memcpy(mpa->private_data, &mpa_v2_params, 922 sizeof(struct mpa_v2_conn_params)); 923 924 if (ep->plen) 925 memcpy(mpa->private_data + 926 sizeof(struct mpa_v2_conn_params), pdata, plen); 927 } else 928 if (plen) 929 memcpy(mpa->private_data, pdata, plen); 930 931 /* 932 * Reference the mpa skb again. This ensures the data area 933 * will remain in memory until the hw acks the tx. 934 * Function fw4_ack() will deref it. 935 */ 936 skb_get(skb); 937 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 938 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 939 BUG_ON(ep->mpa_skb); 940 ep->mpa_skb = skb; 941 ep->snd_seq += mpalen; 942 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 943 } 944 945 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) 946 { 947 int mpalen, wrlen; 948 struct fw_ofld_tx_data_wr *req; 949 struct mpa_message *mpa; 950 struct sk_buff *skb; 951 struct mpa_v2_conn_params mpa_v2_params; 952 953 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 954 955 mpalen = sizeof(*mpa) + plen; 956 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) 957 mpalen += sizeof(struct mpa_v2_conn_params); 958 wrlen = roundup(mpalen + sizeof *req, 16); 959 960 skb = get_skb(NULL, wrlen, GFP_KERNEL); 961 if (!skb) { 962 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); 963 return -ENOMEM; 964 } 965 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 966 967 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); 968 memset(req, 0, wrlen); 969 req->op_to_immdlen = cpu_to_be32( 970 FW_WR_OP(FW_OFLD_TX_DATA_WR) | 971 FW_WR_COMPL(1) | 972 FW_WR_IMMDLEN(mpalen)); 973 req->flowid_len16 = cpu_to_be32( 974 FW_WR_FLOWID(ep->hwtid) | 975 FW_WR_LEN16(wrlen >> 4)); 976 req->plen = cpu_to_be32(mpalen); 977 req->tunnel_to_proxy = cpu_to_be32( 978 FW_OFLD_TX_DATA_WR_FLUSH(1) | 979 FW_OFLD_TX_DATA_WR_SHOVE(1)); 980 981 mpa = (struct mpa_message *)(req + 1); 982 memset(mpa, 0, sizeof(*mpa)); 983 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 984 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | 985 (markers_enabled ? MPA_MARKERS : 0); 986 mpa->revision = ep->mpa_attr.version; 987 mpa->private_data_size = htons(plen); 988 989 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 990 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 991 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 992 sizeof (struct mpa_v2_conn_params)); 993 mpa_v2_params.ird = htons((u16)ep->ird); 994 mpa_v2_params.ord = htons((u16)ep->ord); 995 if (peer2peer && (ep->mpa_attr.p2p_type != 996 FW_RI_INIT_P2PTYPE_DISABLED)) { 997 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 998 999 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) 1000 mpa_v2_params.ord |= 1001 htons(MPA_V2_RDMA_WRITE_RTR); 1002 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) 1003 mpa_v2_params.ord |= 1004 htons(MPA_V2_RDMA_READ_RTR); 1005 } 1006 1007 memcpy(mpa->private_data, &mpa_v2_params, 1008 sizeof(struct mpa_v2_conn_params)); 1009 1010 if (ep->plen) 1011 memcpy(mpa->private_data + 1012 sizeof(struct mpa_v2_conn_params), pdata, plen); 1013 } else 1014 if (plen) 1015 memcpy(mpa->private_data, pdata, plen); 1016 1017 /* 1018 * Reference the mpa skb. This ensures the data area 1019 * will remain in memory until the hw acks the tx. 1020 * Function fw4_ack() will deref it. 1021 */ 1022 skb_get(skb); 1023 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 1024 ep->mpa_skb = skb; 1025 __state_set(&ep->com, MPA_REP_SENT); 1026 ep->snd_seq += mpalen; 1027 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1028 } 1029 1030 static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) 1031 { 1032 struct c4iw_ep *ep; 1033 struct cpl_act_establish *req = cplhdr(skb); 1034 unsigned int tid = GET_TID(req); 1035 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); 1036 struct tid_info *t = dev->rdev.lldi.tids; 1037 1038 ep = lookup_atid(t, atid); 1039 1040 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid, 1041 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); 1042 1043 mutex_lock(&ep->com.mutex); 1044 dst_confirm(ep->dst); 1045 1046 /* setup the hwtid for this connection */ 1047 ep->hwtid = tid; 1048 cxgb4_insert_tid(t, ep, tid); 1049 insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid); 1050 1051 ep->snd_seq = be32_to_cpu(req->snd_isn); 1052 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 1053 1054 set_emss(ep, ntohs(req->tcp_opt)); 1055 1056 /* dealloc the atid */ 1057 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); 1058 cxgb4_free_atid(t, atid); 1059 set_bit(ACT_ESTAB, &ep->com.history); 1060 1061 /* start MPA negotiation */ 1062 send_flowc(ep, NULL); 1063 if (ep->retry_with_mpa_v1) 1064 send_mpa_req(ep, skb, 1); 1065 else 1066 send_mpa_req(ep, skb, mpa_rev); 1067 mutex_unlock(&ep->com.mutex); 1068 return 0; 1069 } 1070 1071 static void close_complete_upcall(struct c4iw_ep *ep, int status) 1072 { 1073 struct iw_cm_event event; 1074 1075 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1076 memset(&event, 0, sizeof(event)); 1077 event.event = IW_CM_EVENT_CLOSE; 1078 event.status = status; 1079 if (ep->com.cm_id) { 1080 PDBG("close complete delivered ep %p cm_id %p tid %u\n", 1081 ep, ep->com.cm_id, ep->hwtid); 1082 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1083 ep->com.cm_id->rem_ref(ep->com.cm_id); 1084 ep->com.cm_id = NULL; 1085 set_bit(CLOSE_UPCALL, &ep->com.history); 1086 } 1087 } 1088 1089 static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) 1090 { 1091 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1092 __state_set(&ep->com, ABORTING); 1093 set_bit(ABORT_CONN, &ep->com.history); 1094 return send_abort(ep, skb, gfp); 1095 } 1096 1097 static void peer_close_upcall(struct c4iw_ep *ep) 1098 { 1099 struct iw_cm_event event; 1100 1101 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1102 memset(&event, 0, sizeof(event)); 1103 event.event = IW_CM_EVENT_DISCONNECT; 1104 if (ep->com.cm_id) { 1105 PDBG("peer close delivered ep %p cm_id %p tid %u\n", 1106 ep, ep->com.cm_id, ep->hwtid); 1107 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1108 set_bit(DISCONN_UPCALL, &ep->com.history); 1109 } 1110 } 1111 1112 static void peer_abort_upcall(struct c4iw_ep *ep) 1113 { 1114 struct iw_cm_event event; 1115 1116 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1117 memset(&event, 0, sizeof(event)); 1118 event.event = IW_CM_EVENT_CLOSE; 1119 event.status = -ECONNRESET; 1120 if (ep->com.cm_id) { 1121 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep, 1122 ep->com.cm_id, ep->hwtid); 1123 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1124 ep->com.cm_id->rem_ref(ep->com.cm_id); 1125 ep->com.cm_id = NULL; 1126 set_bit(ABORT_UPCALL, &ep->com.history); 1127 } 1128 } 1129 1130 static void connect_reply_upcall(struct c4iw_ep *ep, int status) 1131 { 1132 struct iw_cm_event event; 1133 1134 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status); 1135 memset(&event, 0, sizeof(event)); 1136 event.event = IW_CM_EVENT_CONNECT_REPLY; 1137 event.status = status; 1138 memcpy(&event.local_addr, &ep->com.local_addr, 1139 sizeof(ep->com.local_addr)); 1140 memcpy(&event.remote_addr, &ep->com.remote_addr, 1141 sizeof(ep->com.remote_addr)); 1142 1143 if ((status == 0) || (status == -ECONNREFUSED)) { 1144 if (!ep->tried_with_mpa_v1) { 1145 /* this means MPA_v2 is used */ 1146 event.private_data_len = ep->plen - 1147 sizeof(struct mpa_v2_conn_params); 1148 event.private_data = ep->mpa_pkt + 1149 sizeof(struct mpa_message) + 1150 sizeof(struct mpa_v2_conn_params); 1151 } else { 1152 /* this means MPA_v1 is used */ 1153 event.private_data_len = ep->plen; 1154 event.private_data = ep->mpa_pkt + 1155 sizeof(struct mpa_message); 1156 } 1157 } 1158 1159 PDBG("%s ep %p tid %u status %d\n", __func__, ep, 1160 ep->hwtid, status); 1161 set_bit(CONN_RPL_UPCALL, &ep->com.history); 1162 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1163 1164 if (status < 0) { 1165 ep->com.cm_id->rem_ref(ep->com.cm_id); 1166 ep->com.cm_id = NULL; 1167 } 1168 } 1169 1170 static int connect_request_upcall(struct c4iw_ep *ep) 1171 { 1172 struct iw_cm_event event; 1173 int ret; 1174 1175 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1176 memset(&event, 0, sizeof(event)); 1177 event.event = IW_CM_EVENT_CONNECT_REQUEST; 1178 memcpy(&event.local_addr, &ep->com.local_addr, 1179 sizeof(ep->com.local_addr)); 1180 memcpy(&event.remote_addr, &ep->com.remote_addr, 1181 sizeof(ep->com.remote_addr)); 1182 event.provider_data = ep; 1183 if (!ep->tried_with_mpa_v1) { 1184 /* this means MPA_v2 is used */ 1185 event.ord = ep->ord; 1186 event.ird = ep->ird; 1187 event.private_data_len = ep->plen - 1188 sizeof(struct mpa_v2_conn_params); 1189 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + 1190 sizeof(struct mpa_v2_conn_params); 1191 } else { 1192 /* this means MPA_v1 is used. Send max supported */ 1193 event.ord = c4iw_max_read_depth; 1194 event.ird = c4iw_max_read_depth; 1195 event.private_data_len = ep->plen; 1196 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 1197 } 1198 c4iw_get_ep(&ep->com); 1199 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id, 1200 &event); 1201 if (ret) 1202 c4iw_put_ep(&ep->com); 1203 set_bit(CONNREQ_UPCALL, &ep->com.history); 1204 c4iw_put_ep(&ep->parent_ep->com); 1205 return ret; 1206 } 1207 1208 static void established_upcall(struct c4iw_ep *ep) 1209 { 1210 struct iw_cm_event event; 1211 1212 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1213 memset(&event, 0, sizeof(event)); 1214 event.event = IW_CM_EVENT_ESTABLISHED; 1215 event.ird = ep->ird; 1216 event.ord = ep->ord; 1217 if (ep->com.cm_id) { 1218 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1219 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1220 set_bit(ESTAB_UPCALL, &ep->com.history); 1221 } 1222 } 1223 1224 static int update_rx_credits(struct c4iw_ep *ep, u32 credits) 1225 { 1226 struct cpl_rx_data_ack *req; 1227 struct sk_buff *skb; 1228 int wrlen = roundup(sizeof *req, 16); 1229 1230 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); 1231 skb = get_skb(NULL, wrlen, GFP_KERNEL); 1232 if (!skb) { 1233 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); 1234 return 0; 1235 } 1236 1237 /* 1238 * If we couldn't specify the entire rcv window at connection setup 1239 * due to the limit in the number of bits in the RCV_BUFSIZ field, 1240 * then add the overage in to the credits returned. 1241 */ 1242 if (ep->rcv_win > RCV_BUFSIZ_MASK * 1024) 1243 credits += ep->rcv_win - RCV_BUFSIZ_MASK * 1024; 1244 1245 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); 1246 memset(req, 0, wrlen); 1247 INIT_TP_WR(req, ep->hwtid); 1248 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 1249 ep->hwtid)); 1250 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) | 1251 F_RX_DACK_CHANGE | 1252 V_RX_DACK_MODE(dack_mode)); 1253 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); 1254 c4iw_ofld_send(&ep->com.dev->rdev, skb); 1255 return credits; 1256 } 1257 1258 static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) 1259 { 1260 struct mpa_message *mpa; 1261 struct mpa_v2_conn_params *mpa_v2_params; 1262 u16 plen; 1263 u16 resp_ird, resp_ord; 1264 u8 rtr_mismatch = 0, insuff_ird = 0; 1265 struct c4iw_qp_attributes attrs; 1266 enum c4iw_qp_attr_mask mask; 1267 int err; 1268 int disconnect = 0; 1269 1270 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1271 1272 /* 1273 * Stop mpa timer. If it expired, then 1274 * we ignore the MPA reply. process_timeout() 1275 * will abort the connection. 1276 */ 1277 if (stop_ep_timer(ep)) 1278 return 0; 1279 1280 /* 1281 * If we get more than the supported amount of private data 1282 * then we must fail this connection. 1283 */ 1284 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 1285 err = -EINVAL; 1286 goto err; 1287 } 1288 1289 /* 1290 * copy the new data into our accumulation buffer. 1291 */ 1292 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 1293 skb->len); 1294 ep->mpa_pkt_len += skb->len; 1295 1296 /* 1297 * if we don't even have the mpa message, then bail. 1298 */ 1299 if (ep->mpa_pkt_len < sizeof(*mpa)) 1300 return 0; 1301 mpa = (struct mpa_message *) ep->mpa_pkt; 1302 1303 /* Validate MPA header. */ 1304 if (mpa->revision > mpa_rev) { 1305 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," 1306 " Received = %d\n", __func__, mpa_rev, mpa->revision); 1307 err = -EPROTO; 1308 goto err; 1309 } 1310 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { 1311 err = -EPROTO; 1312 goto err; 1313 } 1314 1315 plen = ntohs(mpa->private_data_size); 1316 1317 /* 1318 * Fail if there's too much private data. 1319 */ 1320 if (plen > MPA_MAX_PRIVATE_DATA) { 1321 err = -EPROTO; 1322 goto err; 1323 } 1324 1325 /* 1326 * If plen does not account for pkt size 1327 */ 1328 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1329 err = -EPROTO; 1330 goto err; 1331 } 1332 1333 ep->plen = (u8) plen; 1334 1335 /* 1336 * If we don't have all the pdata yet, then bail. 1337 * We'll continue process when more data arrives. 1338 */ 1339 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1340 return 0; 1341 1342 if (mpa->flags & MPA_REJECT) { 1343 err = -ECONNREFUSED; 1344 goto err; 1345 } 1346 1347 /* 1348 * If we get here we have accumulated the entire mpa 1349 * start reply message including private data. And 1350 * the MPA header is valid. 1351 */ 1352 __state_set(&ep->com, FPDU_MODE); 1353 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1354 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1355 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1356 ep->mpa_attr.version = mpa->revision; 1357 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1358 1359 if (mpa->revision == 2) { 1360 ep->mpa_attr.enhanced_rdma_conn = 1361 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1362 if (ep->mpa_attr.enhanced_rdma_conn) { 1363 mpa_v2_params = (struct mpa_v2_conn_params *) 1364 (ep->mpa_pkt + sizeof(*mpa)); 1365 resp_ird = ntohs(mpa_v2_params->ird) & 1366 MPA_V2_IRD_ORD_MASK; 1367 resp_ord = ntohs(mpa_v2_params->ord) & 1368 MPA_V2_IRD_ORD_MASK; 1369 1370 /* 1371 * This is a double-check. Ideally, below checks are 1372 * not required since ird/ord stuff has been taken 1373 * care of in c4iw_accept_cr 1374 */ 1375 if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) { 1376 err = -ENOMEM; 1377 ep->ird = resp_ord; 1378 ep->ord = resp_ird; 1379 insuff_ird = 1; 1380 } 1381 1382 if (ntohs(mpa_v2_params->ird) & 1383 MPA_V2_PEER2PEER_MODEL) { 1384 if (ntohs(mpa_v2_params->ord) & 1385 MPA_V2_RDMA_WRITE_RTR) 1386 ep->mpa_attr.p2p_type = 1387 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1388 else if (ntohs(mpa_v2_params->ord) & 1389 MPA_V2_RDMA_READ_RTR) 1390 ep->mpa_attr.p2p_type = 1391 FW_RI_INIT_P2PTYPE_READ_REQ; 1392 } 1393 } 1394 } else if (mpa->revision == 1) 1395 if (peer2peer) 1396 ep->mpa_attr.p2p_type = p2p_type; 1397 1398 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 1399 "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = " 1400 "%d\n", __func__, ep->mpa_attr.crc_enabled, 1401 ep->mpa_attr.recv_marker_enabled, 1402 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1403 ep->mpa_attr.p2p_type, p2p_type); 1404 1405 /* 1406 * If responder's RTR does not match with that of initiator, assign 1407 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not 1408 * generated when moving QP to RTS state. 1409 * A TERM message will be sent after QP has moved to RTS state 1410 */ 1411 if ((ep->mpa_attr.version == 2) && peer2peer && 1412 (ep->mpa_attr.p2p_type != p2p_type)) { 1413 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1414 rtr_mismatch = 1; 1415 } 1416 1417 attrs.mpa_attr = ep->mpa_attr; 1418 attrs.max_ird = ep->ird; 1419 attrs.max_ord = ep->ord; 1420 attrs.llp_stream_handle = ep; 1421 attrs.next_state = C4IW_QP_STATE_RTS; 1422 1423 mask = C4IW_QP_ATTR_NEXT_STATE | 1424 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | 1425 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; 1426 1427 /* bind QP and TID with INIT_WR */ 1428 err = c4iw_modify_qp(ep->com.qp->rhp, 1429 ep->com.qp, mask, &attrs, 1); 1430 if (err) 1431 goto err; 1432 1433 /* 1434 * If responder's RTR requirement did not match with what initiator 1435 * supports, generate TERM message 1436 */ 1437 if (rtr_mismatch) { 1438 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__); 1439 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1440 attrs.ecode = MPA_NOMATCH_RTR; 1441 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1442 attrs.send_term = 1; 1443 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1444 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1445 err = -ENOMEM; 1446 disconnect = 1; 1447 goto out; 1448 } 1449 1450 /* 1451 * Generate TERM if initiator IRD is not sufficient for responder 1452 * provided ORD. Currently, we do the same behaviour even when 1453 * responder provided IRD is also not sufficient as regards to 1454 * initiator ORD. 1455 */ 1456 if (insuff_ird) { 1457 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n", 1458 __func__); 1459 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1460 attrs.ecode = MPA_INSUFF_IRD; 1461 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1462 attrs.send_term = 1; 1463 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1464 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1465 err = -ENOMEM; 1466 disconnect = 1; 1467 goto out; 1468 } 1469 goto out; 1470 err: 1471 __state_set(&ep->com, ABORTING); 1472 send_abort(ep, skb, GFP_KERNEL); 1473 out: 1474 connect_reply_upcall(ep, err); 1475 return disconnect; 1476 } 1477 1478 static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) 1479 { 1480 struct mpa_message *mpa; 1481 struct mpa_v2_conn_params *mpa_v2_params; 1482 u16 plen; 1483 1484 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1485 1486 /* 1487 * If we get more than the supported amount of private data 1488 * then we must fail this connection. 1489 */ 1490 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 1491 (void)stop_ep_timer(ep); 1492 abort_connection(ep, skb, GFP_KERNEL); 1493 return; 1494 } 1495 1496 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); 1497 1498 /* 1499 * Copy the new data into our accumulation buffer. 1500 */ 1501 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 1502 skb->len); 1503 ep->mpa_pkt_len += skb->len; 1504 1505 /* 1506 * If we don't even have the mpa message, then bail. 1507 * We'll continue process when more data arrives. 1508 */ 1509 if (ep->mpa_pkt_len < sizeof(*mpa)) 1510 return; 1511 1512 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); 1513 mpa = (struct mpa_message *) ep->mpa_pkt; 1514 1515 /* 1516 * Validate MPA Header. 1517 */ 1518 if (mpa->revision > mpa_rev) { 1519 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," 1520 " Received = %d\n", __func__, mpa_rev, mpa->revision); 1521 (void)stop_ep_timer(ep); 1522 abort_connection(ep, skb, GFP_KERNEL); 1523 return; 1524 } 1525 1526 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { 1527 (void)stop_ep_timer(ep); 1528 abort_connection(ep, skb, GFP_KERNEL); 1529 return; 1530 } 1531 1532 plen = ntohs(mpa->private_data_size); 1533 1534 /* 1535 * Fail if there's too much private data. 1536 */ 1537 if (plen > MPA_MAX_PRIVATE_DATA) { 1538 (void)stop_ep_timer(ep); 1539 abort_connection(ep, skb, GFP_KERNEL); 1540 return; 1541 } 1542 1543 /* 1544 * If plen does not account for pkt size 1545 */ 1546 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1547 (void)stop_ep_timer(ep); 1548 abort_connection(ep, skb, GFP_KERNEL); 1549 return; 1550 } 1551 ep->plen = (u8) plen; 1552 1553 /* 1554 * If we don't have all the pdata yet, then bail. 1555 */ 1556 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1557 return; 1558 1559 /* 1560 * If we get here we have accumulated the entire mpa 1561 * start reply message including private data. 1562 */ 1563 ep->mpa_attr.initiator = 0; 1564 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1565 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1566 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1567 ep->mpa_attr.version = mpa->revision; 1568 if (mpa->revision == 1) 1569 ep->tried_with_mpa_v1 = 1; 1570 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1571 1572 if (mpa->revision == 2) { 1573 ep->mpa_attr.enhanced_rdma_conn = 1574 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1575 if (ep->mpa_attr.enhanced_rdma_conn) { 1576 mpa_v2_params = (struct mpa_v2_conn_params *) 1577 (ep->mpa_pkt + sizeof(*mpa)); 1578 ep->ird = ntohs(mpa_v2_params->ird) & 1579 MPA_V2_IRD_ORD_MASK; 1580 ep->ord = ntohs(mpa_v2_params->ord) & 1581 MPA_V2_IRD_ORD_MASK; 1582 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) 1583 if (peer2peer) { 1584 if (ntohs(mpa_v2_params->ord) & 1585 MPA_V2_RDMA_WRITE_RTR) 1586 ep->mpa_attr.p2p_type = 1587 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1588 else if (ntohs(mpa_v2_params->ord) & 1589 MPA_V2_RDMA_READ_RTR) 1590 ep->mpa_attr.p2p_type = 1591 FW_RI_INIT_P2PTYPE_READ_REQ; 1592 } 1593 } 1594 } else if (mpa->revision == 1) 1595 if (peer2peer) 1596 ep->mpa_attr.p2p_type = p2p_type; 1597 1598 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 1599 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__, 1600 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 1601 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1602 ep->mpa_attr.p2p_type); 1603 1604 /* 1605 * If the endpoint timer already expired, then we ignore 1606 * the start request. process_timeout() will abort 1607 * the connection. 1608 */ 1609 if (!stop_ep_timer(ep)) { 1610 __state_set(&ep->com, MPA_REQ_RCVD); 1611 1612 /* drive upcall */ 1613 mutex_lock(&ep->parent_ep->com.mutex); 1614 if (ep->parent_ep->com.state != DEAD) { 1615 if (connect_request_upcall(ep)) 1616 abort_connection(ep, skb, GFP_KERNEL); 1617 } else { 1618 abort_connection(ep, skb, GFP_KERNEL); 1619 } 1620 mutex_unlock(&ep->parent_ep->com.mutex); 1621 } 1622 return; 1623 } 1624 1625 static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) 1626 { 1627 struct c4iw_ep *ep; 1628 struct cpl_rx_data *hdr = cplhdr(skb); 1629 unsigned int dlen = ntohs(hdr->len); 1630 unsigned int tid = GET_TID(hdr); 1631 struct tid_info *t = dev->rdev.lldi.tids; 1632 __u8 status = hdr->status; 1633 int disconnect = 0; 1634 1635 ep = lookup_tid(t, tid); 1636 if (!ep) 1637 return 0; 1638 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); 1639 skb_pull(skb, sizeof(*hdr)); 1640 skb_trim(skb, dlen); 1641 mutex_lock(&ep->com.mutex); 1642 1643 /* update RX credits */ 1644 update_rx_credits(ep, dlen); 1645 1646 switch (ep->com.state) { 1647 case MPA_REQ_SENT: 1648 ep->rcv_seq += dlen; 1649 disconnect = process_mpa_reply(ep, skb); 1650 break; 1651 case MPA_REQ_WAIT: 1652 ep->rcv_seq += dlen; 1653 process_mpa_request(ep, skb); 1654 break; 1655 case FPDU_MODE: { 1656 struct c4iw_qp_attributes attrs; 1657 BUG_ON(!ep->com.qp); 1658 if (status) 1659 pr_err("%s Unexpected streaming data." \ 1660 " qpid %u ep %p state %d tid %u status %d\n", 1661 __func__, ep->com.qp->wq.sq.qid, ep, 1662 ep->com.state, ep->hwtid, status); 1663 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1664 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1665 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1666 disconnect = 1; 1667 break; 1668 } 1669 default: 1670 break; 1671 } 1672 mutex_unlock(&ep->com.mutex); 1673 if (disconnect) 1674 c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 1675 return 0; 1676 } 1677 1678 static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1679 { 1680 struct c4iw_ep *ep; 1681 struct cpl_abort_rpl_rss *rpl = cplhdr(skb); 1682 int release = 0; 1683 unsigned int tid = GET_TID(rpl); 1684 struct tid_info *t = dev->rdev.lldi.tids; 1685 1686 ep = lookup_tid(t, tid); 1687 if (!ep) { 1688 printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n"); 1689 return 0; 1690 } 1691 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1692 mutex_lock(&ep->com.mutex); 1693 switch (ep->com.state) { 1694 case ABORTING: 1695 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 1696 __state_set(&ep->com, DEAD); 1697 release = 1; 1698 break; 1699 default: 1700 printk(KERN_ERR "%s ep %p state %d\n", 1701 __func__, ep, ep->com.state); 1702 break; 1703 } 1704 mutex_unlock(&ep->com.mutex); 1705 1706 if (release) 1707 release_ep_resources(ep); 1708 return 0; 1709 } 1710 1711 static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) 1712 { 1713 struct sk_buff *skb; 1714 struct fw_ofld_connection_wr *req; 1715 unsigned int mtu_idx; 1716 int wscale; 1717 struct sockaddr_in *sin; 1718 int win; 1719 1720 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1721 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); 1722 memset(req, 0, sizeof(*req)); 1723 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); 1724 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); 1725 req->le.filter = cpu_to_be32(cxgb4_select_ntuple( 1726 ep->com.dev->rdev.lldi.ports[0], 1727 ep->l2t)); 1728 sin = (struct sockaddr_in *)&ep->com.mapped_local_addr; 1729 req->le.lport = sin->sin_port; 1730 req->le.u.ipv4.lip = sin->sin_addr.s_addr; 1731 sin = (struct sockaddr_in *)&ep->com.mapped_remote_addr; 1732 req->le.pport = sin->sin_port; 1733 req->le.u.ipv4.pip = sin->sin_addr.s_addr; 1734 req->tcb.t_state_to_astid = 1735 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) | 1736 V_FW_OFLD_CONNECTION_WR_ASTID(atid)); 1737 req->tcb.cplrxdataack_cplpassacceptrpl = 1738 htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK); 1739 req->tcb.tx_max = (__force __be32) jiffies; 1740 req->tcb.rcv_adv = htons(1); 1741 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, 1742 enable_tcp_timestamps); 1743 wscale = compute_wscale(rcv_win); 1744 1745 /* 1746 * Specify the largest window that will fit in opt0. The 1747 * remainder will be specified in the rx_data_ack. 1748 */ 1749 win = ep->rcv_win >> 10; 1750 if (win > RCV_BUFSIZ_MASK) 1751 win = RCV_BUFSIZ_MASK; 1752 1753 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) | 1754 (nocong ? NO_CONG(1) : 0) | 1755 KEEP_ALIVE(1) | 1756 DELACK(1) | 1757 WND_SCALE(wscale) | 1758 MSS_IDX(mtu_idx) | 1759 L2T_IDX(ep->l2t->idx) | 1760 TX_CHAN(ep->tx_chan) | 1761 SMAC_SEL(ep->smac_idx) | 1762 DSCP(ep->tos) | 1763 ULP_MODE(ULP_MODE_TCPDDP) | 1764 RCV_BUFSIZ(win)); 1765 req->tcb.opt2 = (__force __be32) (PACE(1) | 1766 TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | 1767 RX_CHANNEL(0) | 1768 CCTRL_ECN(enable_ecn) | 1769 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid)); 1770 if (enable_tcp_timestamps) 1771 req->tcb.opt2 |= (__force __be32) TSTAMPS_EN(1); 1772 if (enable_tcp_sack) 1773 req->tcb.opt2 |= (__force __be32) SACK_EN(1); 1774 if (wscale && enable_tcp_window_scaling) 1775 req->tcb.opt2 |= (__force __be32) WND_SCALE_EN(1); 1776 req->tcb.opt0 = cpu_to_be64((__force u64) req->tcb.opt0); 1777 req->tcb.opt2 = cpu_to_be32((__force u32) req->tcb.opt2); 1778 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); 1779 set_bit(ACT_OFLD_CONN, &ep->com.history); 1780 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1781 } 1782 1783 /* 1784 * Return whether a failed active open has allocated a TID 1785 */ 1786 static inline int act_open_has_tid(int status) 1787 { 1788 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && 1789 status != CPL_ERR_ARP_MISS; 1790 } 1791 1792 /* Returns whether a CPL status conveys negative advice. 1793 */ 1794 static int is_neg_adv(unsigned int status) 1795 { 1796 return status == CPL_ERR_RTX_NEG_ADVICE || 1797 status == CPL_ERR_PERSIST_NEG_ADVICE || 1798 status == CPL_ERR_KEEPALV_NEG_ADVICE; 1799 } 1800 1801 static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi) 1802 { 1803 ep->snd_win = snd_win; 1804 ep->rcv_win = rcv_win; 1805 PDBG("%s snd_win %d rcv_win %d\n", __func__, ep->snd_win, ep->rcv_win); 1806 } 1807 1808 #define ACT_OPEN_RETRY_COUNT 2 1809 1810 static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, 1811 struct dst_entry *dst, struct c4iw_dev *cdev, 1812 bool clear_mpa_v1) 1813 { 1814 struct neighbour *n; 1815 int err, step; 1816 struct net_device *pdev; 1817 1818 n = dst_neigh_lookup(dst, peer_ip); 1819 if (!n) 1820 return -ENODEV; 1821 1822 rcu_read_lock(); 1823 err = -ENOMEM; 1824 if (n->dev->flags & IFF_LOOPBACK) { 1825 if (iptype == 4) 1826 pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip); 1827 else if (IS_ENABLED(CONFIG_IPV6)) 1828 for_each_netdev(&init_net, pdev) { 1829 if (ipv6_chk_addr(&init_net, 1830 (struct in6_addr *)peer_ip, 1831 pdev, 1)) 1832 break; 1833 } 1834 else 1835 pdev = NULL; 1836 1837 if (!pdev) { 1838 err = -ENODEV; 1839 goto out; 1840 } 1841 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, 1842 n, pdev, 0); 1843 if (!ep->l2t) 1844 goto out; 1845 ep->mtu = pdev->mtu; 1846 ep->tx_chan = cxgb4_port_chan(pdev); 1847 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; 1848 step = cdev->rdev.lldi.ntxq / 1849 cdev->rdev.lldi.nchan; 1850 ep->txq_idx = cxgb4_port_idx(pdev) * step; 1851 step = cdev->rdev.lldi.nrxq / 1852 cdev->rdev.lldi.nchan; 1853 ep->ctrlq_idx = cxgb4_port_idx(pdev); 1854 ep->rss_qid = cdev->rdev.lldi.rxq_ids[ 1855 cxgb4_port_idx(pdev) * step]; 1856 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev)); 1857 dev_put(pdev); 1858 } else { 1859 pdev = get_real_dev(n->dev); 1860 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, 1861 n, pdev, 0); 1862 if (!ep->l2t) 1863 goto out; 1864 ep->mtu = dst_mtu(dst); 1865 ep->tx_chan = cxgb4_port_chan(pdev); 1866 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; 1867 step = cdev->rdev.lldi.ntxq / 1868 cdev->rdev.lldi.nchan; 1869 ep->txq_idx = cxgb4_port_idx(pdev) * step; 1870 ep->ctrlq_idx = cxgb4_port_idx(pdev); 1871 step = cdev->rdev.lldi.nrxq / 1872 cdev->rdev.lldi.nchan; 1873 ep->rss_qid = cdev->rdev.lldi.rxq_ids[ 1874 cxgb4_port_idx(pdev) * step]; 1875 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev)); 1876 1877 if (clear_mpa_v1) { 1878 ep->retry_with_mpa_v1 = 0; 1879 ep->tried_with_mpa_v1 = 0; 1880 } 1881 } 1882 err = 0; 1883 out: 1884 rcu_read_unlock(); 1885 1886 neigh_release(n); 1887 1888 return err; 1889 } 1890 1891 static int c4iw_reconnect(struct c4iw_ep *ep) 1892 { 1893 int err = 0; 1894 struct sockaddr_in *laddr = (struct sockaddr_in *) 1895 &ep->com.cm_id->local_addr; 1896 struct sockaddr_in *raddr = (struct sockaddr_in *) 1897 &ep->com.cm_id->remote_addr; 1898 struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *) 1899 &ep->com.cm_id->local_addr; 1900 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *) 1901 &ep->com.cm_id->remote_addr; 1902 int iptype; 1903 __u8 *ra; 1904 1905 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); 1906 init_timer(&ep->timer); 1907 1908 /* 1909 * Allocate an active TID to initiate a TCP connection. 1910 */ 1911 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep); 1912 if (ep->atid == -1) { 1913 pr_err("%s - cannot alloc atid.\n", __func__); 1914 err = -ENOMEM; 1915 goto fail2; 1916 } 1917 insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid); 1918 1919 /* find a route */ 1920 if (ep->com.cm_id->local_addr.ss_family == AF_INET) { 1921 ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr, 1922 raddr->sin_addr.s_addr, laddr->sin_port, 1923 raddr->sin_port, 0); 1924 iptype = 4; 1925 ra = (__u8 *)&raddr->sin_addr; 1926 } else { 1927 ep->dst = find_route6(ep->com.dev, laddr6->sin6_addr.s6_addr, 1928 raddr6->sin6_addr.s6_addr, 1929 laddr6->sin6_port, raddr6->sin6_port, 0, 1930 raddr6->sin6_scope_id); 1931 iptype = 6; 1932 ra = (__u8 *)&raddr6->sin6_addr; 1933 } 1934 if (!ep->dst) { 1935 pr_err("%s - cannot find route.\n", __func__); 1936 err = -EHOSTUNREACH; 1937 goto fail3; 1938 } 1939 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false); 1940 if (err) { 1941 pr_err("%s - cannot alloc l2e.\n", __func__); 1942 goto fail4; 1943 } 1944 1945 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 1946 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 1947 ep->l2t->idx); 1948 1949 state_set(&ep->com, CONNECTING); 1950 ep->tos = 0; 1951 1952 /* send connect request to rnic */ 1953 err = send_connect(ep); 1954 if (!err) 1955 goto out; 1956 1957 cxgb4_l2t_release(ep->l2t); 1958 fail4: 1959 dst_release(ep->dst); 1960 fail3: 1961 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 1962 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 1963 fail2: 1964 /* 1965 * remember to send notification to upper layer. 1966 * We are in here so the upper layer is not aware that this is 1967 * re-connect attempt and so, upper layer is still waiting for 1968 * response of 1st connect request. 1969 */ 1970 connect_reply_upcall(ep, -ECONNRESET); 1971 c4iw_put_ep(&ep->com); 1972 out: 1973 return err; 1974 } 1975 1976 static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1977 { 1978 struct c4iw_ep *ep; 1979 struct cpl_act_open_rpl *rpl = cplhdr(skb); 1980 unsigned int atid = GET_TID_TID(GET_AOPEN_ATID( 1981 ntohl(rpl->atid_status))); 1982 struct tid_info *t = dev->rdev.lldi.tids; 1983 int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status)); 1984 struct sockaddr_in *la; 1985 struct sockaddr_in *ra; 1986 struct sockaddr_in6 *la6; 1987 struct sockaddr_in6 *ra6; 1988 1989 ep = lookup_atid(t, atid); 1990 la = (struct sockaddr_in *)&ep->com.mapped_local_addr; 1991 ra = (struct sockaddr_in *)&ep->com.mapped_remote_addr; 1992 la6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr; 1993 ra6 = (struct sockaddr_in6 *)&ep->com.mapped_remote_addr; 1994 1995 PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, 1996 status, status2errno(status)); 1997 1998 if (is_neg_adv(status)) { 1999 printk(KERN_WARNING MOD "Connection problems for atid %u\n", 2000 atid); 2001 return 0; 2002 } 2003 2004 set_bit(ACT_OPEN_RPL, &ep->com.history); 2005 2006 /* 2007 * Log interesting failures. 2008 */ 2009 switch (status) { 2010 case CPL_ERR_CONN_RESET: 2011 case CPL_ERR_CONN_TIMEDOUT: 2012 break; 2013 case CPL_ERR_TCAM_FULL: 2014 mutex_lock(&dev->rdev.stats.lock); 2015 dev->rdev.stats.tcam_full++; 2016 mutex_unlock(&dev->rdev.stats.lock); 2017 if (ep->com.local_addr.ss_family == AF_INET && 2018 dev->rdev.lldi.enable_fw_ofld_conn) { 2019 send_fw_act_open_req(ep, 2020 GET_TID_TID(GET_AOPEN_ATID( 2021 ntohl(rpl->atid_status)))); 2022 return 0; 2023 } 2024 break; 2025 case CPL_ERR_CONN_EXIST: 2026 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 2027 set_bit(ACT_RETRY_INUSE, &ep->com.history); 2028 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, 2029 atid); 2030 cxgb4_free_atid(t, atid); 2031 dst_release(ep->dst); 2032 cxgb4_l2t_release(ep->l2t); 2033 c4iw_reconnect(ep); 2034 return 0; 2035 } 2036 break; 2037 default: 2038 if (ep->com.local_addr.ss_family == AF_INET) { 2039 pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n", 2040 atid, status, status2errno(status), 2041 &la->sin_addr.s_addr, ntohs(la->sin_port), 2042 &ra->sin_addr.s_addr, ntohs(ra->sin_port)); 2043 } else { 2044 pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n", 2045 atid, status, status2errno(status), 2046 la6->sin6_addr.s6_addr, ntohs(la6->sin6_port), 2047 ra6->sin6_addr.s6_addr, ntohs(ra6->sin6_port)); 2048 } 2049 break; 2050 } 2051 2052 connect_reply_upcall(ep, status2errno(status)); 2053 state_set(&ep->com, DEAD); 2054 2055 if (status && act_open_has_tid(status)) 2056 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); 2057 2058 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); 2059 cxgb4_free_atid(t, atid); 2060 dst_release(ep->dst); 2061 cxgb4_l2t_release(ep->l2t); 2062 c4iw_put_ep(&ep->com); 2063 2064 return 0; 2065 } 2066 2067 static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2068 { 2069 struct cpl_pass_open_rpl *rpl = cplhdr(skb); 2070 struct tid_info *t = dev->rdev.lldi.tids; 2071 unsigned int stid = GET_TID(rpl); 2072 struct c4iw_listen_ep *ep = lookup_stid(t, stid); 2073 2074 if (!ep) { 2075 PDBG("%s stid %d lookup failure!\n", __func__, stid); 2076 goto out; 2077 } 2078 PDBG("%s ep %p status %d error %d\n", __func__, ep, 2079 rpl->status, status2errno(rpl->status)); 2080 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); 2081 2082 out: 2083 return 0; 2084 } 2085 2086 static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2087 { 2088 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); 2089 struct tid_info *t = dev->rdev.lldi.tids; 2090 unsigned int stid = GET_TID(rpl); 2091 struct c4iw_listen_ep *ep = lookup_stid(t, stid); 2092 2093 PDBG("%s ep %p\n", __func__, ep); 2094 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); 2095 return 0; 2096 } 2097 2098 static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, 2099 struct cpl_pass_accept_req *req) 2100 { 2101 struct cpl_pass_accept_rpl *rpl; 2102 unsigned int mtu_idx; 2103 u64 opt0; 2104 u32 opt2; 2105 int wscale; 2106 struct cpl_t5_pass_accept_rpl *rpl5 = NULL; 2107 int win; 2108 2109 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2110 BUG_ON(skb_cloned(skb)); 2111 2112 skb_get(skb); 2113 rpl = cplhdr(skb); 2114 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 2115 skb_trim(skb, roundup(sizeof(*rpl5), 16)); 2116 rpl5 = (void *)rpl; 2117 INIT_TP_WR(rpl5, ep->hwtid); 2118 } else { 2119 skb_trim(skb, sizeof(*rpl)); 2120 INIT_TP_WR(rpl, ep->hwtid); 2121 } 2122 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, 2123 ep->hwtid)); 2124 2125 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, 2126 enable_tcp_timestamps && req->tcpopt.tstamp); 2127 wscale = compute_wscale(rcv_win); 2128 2129 /* 2130 * Specify the largest window that will fit in opt0. The 2131 * remainder will be specified in the rx_data_ack. 2132 */ 2133 win = ep->rcv_win >> 10; 2134 if (win > RCV_BUFSIZ_MASK) 2135 win = RCV_BUFSIZ_MASK; 2136 opt0 = (nocong ? NO_CONG(1) : 0) | 2137 KEEP_ALIVE(1) | 2138 DELACK(1) | 2139 WND_SCALE(wscale) | 2140 MSS_IDX(mtu_idx) | 2141 L2T_IDX(ep->l2t->idx) | 2142 TX_CHAN(ep->tx_chan) | 2143 SMAC_SEL(ep->smac_idx) | 2144 DSCP(ep->tos >> 2) | 2145 ULP_MODE(ULP_MODE_TCPDDP) | 2146 RCV_BUFSIZ(win); 2147 opt2 = RX_CHANNEL(0) | 2148 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 2149 2150 if (enable_tcp_timestamps && req->tcpopt.tstamp) 2151 opt2 |= TSTAMPS_EN(1); 2152 if (enable_tcp_sack && req->tcpopt.sack) 2153 opt2 |= SACK_EN(1); 2154 if (wscale && enable_tcp_window_scaling) 2155 opt2 |= WND_SCALE_EN(1); 2156 if (enable_ecn) { 2157 const struct tcphdr *tcph; 2158 u32 hlen = ntohl(req->hdr_len); 2159 2160 tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) + 2161 G_IP_HDR_LEN(hlen); 2162 if (tcph->ece && tcph->cwr) 2163 opt2 |= CCTRL_ECN(1); 2164 } 2165 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 2166 u32 isn = (prandom_u32() & ~7UL) - 1; 2167 opt2 |= T5_OPT_2_VALID; 2168 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); 2169 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ 2170 rpl5 = (void *)rpl; 2171 memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16)); 2172 if (peer2peer) 2173 isn += 4; 2174 rpl5->iss = cpu_to_be32(isn); 2175 PDBG("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss)); 2176 } 2177 2178 rpl->opt0 = cpu_to_be64(opt0); 2179 rpl->opt2 = cpu_to_be32(opt2); 2180 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 2181 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 2182 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 2183 2184 return; 2185 } 2186 2187 static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb) 2188 { 2189 PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid); 2190 BUG_ON(skb_cloned(skb)); 2191 skb_trim(skb, sizeof(struct cpl_tid_release)); 2192 release_tid(&dev->rdev, hwtid, skb); 2193 return; 2194 } 2195 2196 static void get_4tuple(struct cpl_pass_accept_req *req, int *iptype, 2197 __u8 *local_ip, __u8 *peer_ip, 2198 __be16 *local_port, __be16 *peer_port) 2199 { 2200 int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len)); 2201 int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len)); 2202 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len); 2203 struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len); 2204 struct tcphdr *tcp = (struct tcphdr *) 2205 ((u8 *)(req + 1) + eth_len + ip_len); 2206 2207 if (ip->version == 4) { 2208 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__, 2209 ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source), 2210 ntohs(tcp->dest)); 2211 *iptype = 4; 2212 memcpy(peer_ip, &ip->saddr, 4); 2213 memcpy(local_ip, &ip->daddr, 4); 2214 } else { 2215 PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__, 2216 ip6->saddr.s6_addr, ip6->daddr.s6_addr, ntohs(tcp->source), 2217 ntohs(tcp->dest)); 2218 *iptype = 6; 2219 memcpy(peer_ip, ip6->saddr.s6_addr, 16); 2220 memcpy(local_ip, ip6->daddr.s6_addr, 16); 2221 } 2222 *peer_port = tcp->source; 2223 *local_port = tcp->dest; 2224 2225 return; 2226 } 2227 2228 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) 2229 { 2230 struct c4iw_ep *child_ep = NULL, *parent_ep; 2231 struct cpl_pass_accept_req *req = cplhdr(skb); 2232 unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); 2233 struct tid_info *t = dev->rdev.lldi.tids; 2234 unsigned int hwtid = GET_TID(req); 2235 struct dst_entry *dst; 2236 __u8 local_ip[16], peer_ip[16]; 2237 __be16 local_port, peer_port; 2238 int err; 2239 u16 peer_mss = ntohs(req->tcpopt.mss); 2240 int iptype; 2241 unsigned short hdrs; 2242 2243 parent_ep = lookup_stid(t, stid); 2244 if (!parent_ep) { 2245 PDBG("%s connect request on invalid stid %d\n", __func__, stid); 2246 goto reject; 2247 } 2248 2249 if (state_read(&parent_ep->com) != LISTEN) { 2250 printk(KERN_ERR "%s - listening ep not in LISTEN\n", 2251 __func__); 2252 goto reject; 2253 } 2254 2255 get_4tuple(req, &iptype, local_ip, peer_ip, &local_port, &peer_port); 2256 2257 /* Find output route */ 2258 if (iptype == 4) { 2259 PDBG("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n" 2260 , __func__, parent_ep, hwtid, 2261 local_ip, peer_ip, ntohs(local_port), 2262 ntohs(peer_port), peer_mss); 2263 dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip, 2264 local_port, peer_port, 2265 GET_POPEN_TOS(ntohl(req->tos_stid))); 2266 } else { 2267 PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n" 2268 , __func__, parent_ep, hwtid, 2269 local_ip, peer_ip, ntohs(local_port), 2270 ntohs(peer_port), peer_mss); 2271 dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port, 2272 PASS_OPEN_TOS(ntohl(req->tos_stid)), 2273 ((struct sockaddr_in6 *) 2274 &parent_ep->com.local_addr)->sin6_scope_id); 2275 } 2276 if (!dst) { 2277 printk(KERN_ERR MOD "%s - failed to find dst entry!\n", 2278 __func__); 2279 goto reject; 2280 } 2281 2282 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); 2283 if (!child_ep) { 2284 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", 2285 __func__); 2286 dst_release(dst); 2287 goto reject; 2288 } 2289 2290 err = import_ep(child_ep, iptype, peer_ip, dst, dev, false); 2291 if (err) { 2292 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", 2293 __func__); 2294 dst_release(dst); 2295 kfree(child_ep); 2296 goto reject; 2297 } 2298 2299 hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) + 2300 ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0); 2301 if (peer_mss && child_ep->mtu > (peer_mss + hdrs)) 2302 child_ep->mtu = peer_mss + hdrs; 2303 2304 state_set(&child_ep->com, CONNECTING); 2305 child_ep->com.dev = dev; 2306 child_ep->com.cm_id = NULL; 2307 if (iptype == 4) { 2308 struct sockaddr_in *sin = (struct sockaddr_in *) 2309 &child_ep->com.local_addr; 2310 sin->sin_family = PF_INET; 2311 sin->sin_port = local_port; 2312 sin->sin_addr.s_addr = *(__be32 *)local_ip; 2313 sin = (struct sockaddr_in *)&child_ep->com.remote_addr; 2314 sin->sin_family = PF_INET; 2315 sin->sin_port = peer_port; 2316 sin->sin_addr.s_addr = *(__be32 *)peer_ip; 2317 } else { 2318 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) 2319 &child_ep->com.local_addr; 2320 sin6->sin6_family = PF_INET6; 2321 sin6->sin6_port = local_port; 2322 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); 2323 sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr; 2324 sin6->sin6_family = PF_INET6; 2325 sin6->sin6_port = peer_port; 2326 memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16); 2327 } 2328 c4iw_get_ep(&parent_ep->com); 2329 child_ep->parent_ep = parent_ep; 2330 child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid)); 2331 child_ep->dst = dst; 2332 child_ep->hwtid = hwtid; 2333 2334 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__, 2335 child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid); 2336 2337 init_timer(&child_ep->timer); 2338 cxgb4_insert_tid(t, child_ep, hwtid); 2339 insert_handle(dev, &dev->hwtid_idr, child_ep, child_ep->hwtid); 2340 accept_cr(child_ep, skb, req); 2341 set_bit(PASS_ACCEPT_REQ, &child_ep->com.history); 2342 goto out; 2343 reject: 2344 reject_cr(dev, hwtid, skb); 2345 out: 2346 return 0; 2347 } 2348 2349 static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) 2350 { 2351 struct c4iw_ep *ep; 2352 struct cpl_pass_establish *req = cplhdr(skb); 2353 struct tid_info *t = dev->rdev.lldi.tids; 2354 unsigned int tid = GET_TID(req); 2355 2356 ep = lookup_tid(t, tid); 2357 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2358 ep->snd_seq = be32_to_cpu(req->snd_isn); 2359 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 2360 2361 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid, 2362 ntohs(req->tcp_opt)); 2363 2364 set_emss(ep, ntohs(req->tcp_opt)); 2365 2366 dst_confirm(ep->dst); 2367 state_set(&ep->com, MPA_REQ_WAIT); 2368 start_ep_timer(ep); 2369 send_flowc(ep, skb); 2370 set_bit(PASS_ESTAB, &ep->com.history); 2371 2372 return 0; 2373 } 2374 2375 static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) 2376 { 2377 struct cpl_peer_close *hdr = cplhdr(skb); 2378 struct c4iw_ep *ep; 2379 struct c4iw_qp_attributes attrs; 2380 int disconnect = 1; 2381 int release = 0; 2382 struct tid_info *t = dev->rdev.lldi.tids; 2383 unsigned int tid = GET_TID(hdr); 2384 int ret; 2385 2386 ep = lookup_tid(t, tid); 2387 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2388 dst_confirm(ep->dst); 2389 2390 set_bit(PEER_CLOSE, &ep->com.history); 2391 mutex_lock(&ep->com.mutex); 2392 switch (ep->com.state) { 2393 case MPA_REQ_WAIT: 2394 __state_set(&ep->com, CLOSING); 2395 break; 2396 case MPA_REQ_SENT: 2397 __state_set(&ep->com, CLOSING); 2398 connect_reply_upcall(ep, -ECONNRESET); 2399 break; 2400 case MPA_REQ_RCVD: 2401 2402 /* 2403 * We're gonna mark this puppy DEAD, but keep 2404 * the reference on it until the ULP accepts or 2405 * rejects the CR. Also wake up anyone waiting 2406 * in rdma connection migration (see c4iw_accept_cr()). 2407 */ 2408 __state_set(&ep->com, CLOSING); 2409 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 2410 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 2411 break; 2412 case MPA_REP_SENT: 2413 __state_set(&ep->com, CLOSING); 2414 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 2415 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 2416 break; 2417 case FPDU_MODE: 2418 start_ep_timer(ep); 2419 __state_set(&ep->com, CLOSING); 2420 attrs.next_state = C4IW_QP_STATE_CLOSING; 2421 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2422 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2423 if (ret != -ECONNRESET) { 2424 peer_close_upcall(ep); 2425 disconnect = 1; 2426 } 2427 break; 2428 case ABORTING: 2429 disconnect = 0; 2430 break; 2431 case CLOSING: 2432 __state_set(&ep->com, MORIBUND); 2433 disconnect = 0; 2434 break; 2435 case MORIBUND: 2436 (void)stop_ep_timer(ep); 2437 if (ep->com.cm_id && ep->com.qp) { 2438 attrs.next_state = C4IW_QP_STATE_IDLE; 2439 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2440 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2441 } 2442 close_complete_upcall(ep, 0); 2443 __state_set(&ep->com, DEAD); 2444 release = 1; 2445 disconnect = 0; 2446 break; 2447 case DEAD: 2448 disconnect = 0; 2449 break; 2450 default: 2451 BUG_ON(1); 2452 } 2453 mutex_unlock(&ep->com.mutex); 2454 if (disconnect) 2455 c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 2456 if (release) 2457 release_ep_resources(ep); 2458 return 0; 2459 } 2460 2461 static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) 2462 { 2463 struct cpl_abort_req_rss *req = cplhdr(skb); 2464 struct c4iw_ep *ep; 2465 struct cpl_abort_rpl *rpl; 2466 struct sk_buff *rpl_skb; 2467 struct c4iw_qp_attributes attrs; 2468 int ret; 2469 int release = 0; 2470 struct tid_info *t = dev->rdev.lldi.tids; 2471 unsigned int tid = GET_TID(req); 2472 2473 ep = lookup_tid(t, tid); 2474 if (is_neg_adv(req->status)) { 2475 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, 2476 ep->hwtid); 2477 return 0; 2478 } 2479 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 2480 ep->com.state); 2481 set_bit(PEER_ABORT, &ep->com.history); 2482 2483 /* 2484 * Wake up any threads in rdma_init() or rdma_fini(). 2485 * However, this is not needed if com state is just 2486 * MPA_REQ_SENT 2487 */ 2488 if (ep->com.state != MPA_REQ_SENT) 2489 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 2490 2491 mutex_lock(&ep->com.mutex); 2492 switch (ep->com.state) { 2493 case CONNECTING: 2494 break; 2495 case MPA_REQ_WAIT: 2496 (void)stop_ep_timer(ep); 2497 break; 2498 case MPA_REQ_SENT: 2499 (void)stop_ep_timer(ep); 2500 if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1)) 2501 connect_reply_upcall(ep, -ECONNRESET); 2502 else { 2503 /* 2504 * we just don't send notification upwards because we 2505 * want to retry with mpa_v1 without upper layers even 2506 * knowing it. 2507 * 2508 * do some housekeeping so as to re-initiate the 2509 * connection 2510 */ 2511 PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__, 2512 mpa_rev); 2513 ep->retry_with_mpa_v1 = 1; 2514 } 2515 break; 2516 case MPA_REP_SENT: 2517 break; 2518 case MPA_REQ_RCVD: 2519 break; 2520 case MORIBUND: 2521 case CLOSING: 2522 stop_ep_timer(ep); 2523 /*FALLTHROUGH*/ 2524 case FPDU_MODE: 2525 if (ep->com.cm_id && ep->com.qp) { 2526 attrs.next_state = C4IW_QP_STATE_ERROR; 2527 ret = c4iw_modify_qp(ep->com.qp->rhp, 2528 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 2529 &attrs, 1); 2530 if (ret) 2531 printk(KERN_ERR MOD 2532 "%s - qp <- error failed!\n", 2533 __func__); 2534 } 2535 peer_abort_upcall(ep); 2536 break; 2537 case ABORTING: 2538 break; 2539 case DEAD: 2540 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); 2541 mutex_unlock(&ep->com.mutex); 2542 return 0; 2543 default: 2544 BUG_ON(1); 2545 break; 2546 } 2547 dst_confirm(ep->dst); 2548 if (ep->com.state != ABORTING) { 2549 __state_set(&ep->com, DEAD); 2550 /* we don't release if we want to retry with mpa_v1 */ 2551 if (!ep->retry_with_mpa_v1) 2552 release = 1; 2553 } 2554 mutex_unlock(&ep->com.mutex); 2555 2556 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); 2557 if (!rpl_skb) { 2558 printk(KERN_ERR MOD "%s - cannot allocate skb!\n", 2559 __func__); 2560 release = 1; 2561 goto out; 2562 } 2563 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 2564 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); 2565 INIT_TP_WR(rpl, ep->hwtid); 2566 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); 2567 rpl->cmd = CPL_ABORT_NO_RST; 2568 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); 2569 out: 2570 if (release) 2571 release_ep_resources(ep); 2572 else if (ep->retry_with_mpa_v1) { 2573 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); 2574 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); 2575 dst_release(ep->dst); 2576 cxgb4_l2t_release(ep->l2t); 2577 c4iw_reconnect(ep); 2578 } 2579 2580 return 0; 2581 } 2582 2583 static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2584 { 2585 struct c4iw_ep *ep; 2586 struct c4iw_qp_attributes attrs; 2587 struct cpl_close_con_rpl *rpl = cplhdr(skb); 2588 int release = 0; 2589 struct tid_info *t = dev->rdev.lldi.tids; 2590 unsigned int tid = GET_TID(rpl); 2591 2592 ep = lookup_tid(t, tid); 2593 2594 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2595 BUG_ON(!ep); 2596 2597 /* The cm_id may be null if we failed to connect */ 2598 mutex_lock(&ep->com.mutex); 2599 switch (ep->com.state) { 2600 case CLOSING: 2601 __state_set(&ep->com, MORIBUND); 2602 break; 2603 case MORIBUND: 2604 (void)stop_ep_timer(ep); 2605 if ((ep->com.cm_id) && (ep->com.qp)) { 2606 attrs.next_state = C4IW_QP_STATE_IDLE; 2607 c4iw_modify_qp(ep->com.qp->rhp, 2608 ep->com.qp, 2609 C4IW_QP_ATTR_NEXT_STATE, 2610 &attrs, 1); 2611 } 2612 close_complete_upcall(ep, 0); 2613 __state_set(&ep->com, DEAD); 2614 release = 1; 2615 break; 2616 case ABORTING: 2617 case DEAD: 2618 break; 2619 default: 2620 BUG_ON(1); 2621 break; 2622 } 2623 mutex_unlock(&ep->com.mutex); 2624 if (release) 2625 release_ep_resources(ep); 2626 return 0; 2627 } 2628 2629 static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) 2630 { 2631 struct cpl_rdma_terminate *rpl = cplhdr(skb); 2632 struct tid_info *t = dev->rdev.lldi.tids; 2633 unsigned int tid = GET_TID(rpl); 2634 struct c4iw_ep *ep; 2635 struct c4iw_qp_attributes attrs; 2636 2637 ep = lookup_tid(t, tid); 2638 BUG_ON(!ep); 2639 2640 if (ep && ep->com.qp) { 2641 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, 2642 ep->com.qp->wq.sq.qid); 2643 attrs.next_state = C4IW_QP_STATE_TERMINATE; 2644 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2645 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2646 } else 2647 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); 2648 2649 return 0; 2650 } 2651 2652 /* 2653 * Upcall from the adapter indicating data has been transmitted. 2654 * For us its just the single MPA request or reply. We can now free 2655 * the skb holding the mpa message. 2656 */ 2657 static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) 2658 { 2659 struct c4iw_ep *ep; 2660 struct cpl_fw4_ack *hdr = cplhdr(skb); 2661 u8 credits = hdr->credits; 2662 unsigned int tid = GET_TID(hdr); 2663 struct tid_info *t = dev->rdev.lldi.tids; 2664 2665 2666 ep = lookup_tid(t, tid); 2667 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); 2668 if (credits == 0) { 2669 PDBG("%s 0 credit ack ep %p tid %u state %u\n", 2670 __func__, ep, ep->hwtid, state_read(&ep->com)); 2671 return 0; 2672 } 2673 2674 dst_confirm(ep->dst); 2675 if (ep->mpa_skb) { 2676 PDBG("%s last streaming msg ack ep %p tid %u state %u " 2677 "initiator %u freeing skb\n", __func__, ep, ep->hwtid, 2678 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); 2679 kfree_skb(ep->mpa_skb); 2680 ep->mpa_skb = NULL; 2681 } 2682 return 0; 2683 } 2684 2685 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 2686 { 2687 int err = 0; 2688 int disconnect = 0; 2689 struct c4iw_ep *ep = to_ep(cm_id); 2690 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2691 2692 mutex_lock(&ep->com.mutex); 2693 if (ep->com.state == DEAD) { 2694 mutex_unlock(&ep->com.mutex); 2695 c4iw_put_ep(&ep->com); 2696 return -ECONNRESET; 2697 } 2698 set_bit(ULP_REJECT, &ep->com.history); 2699 BUG_ON(ep->com.state != MPA_REQ_RCVD); 2700 if (mpa_rev == 0) 2701 abort_connection(ep, NULL, GFP_KERNEL); 2702 else { 2703 err = send_mpa_reject(ep, pdata, pdata_len); 2704 disconnect = 1; 2705 } 2706 mutex_unlock(&ep->com.mutex); 2707 if (disconnect) 2708 err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 2709 c4iw_put_ep(&ep->com); 2710 return 0; 2711 } 2712 2713 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 2714 { 2715 int err; 2716 struct c4iw_qp_attributes attrs; 2717 enum c4iw_qp_attr_mask mask; 2718 struct c4iw_ep *ep = to_ep(cm_id); 2719 struct c4iw_dev *h = to_c4iw_dev(cm_id->device); 2720 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); 2721 2722 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2723 2724 mutex_lock(&ep->com.mutex); 2725 if (ep->com.state == DEAD) { 2726 err = -ECONNRESET; 2727 goto err; 2728 } 2729 2730 BUG_ON(ep->com.state != MPA_REQ_RCVD); 2731 BUG_ON(!qp); 2732 2733 set_bit(ULP_ACCEPT, &ep->com.history); 2734 if ((conn_param->ord > c4iw_max_read_depth) || 2735 (conn_param->ird > c4iw_max_read_depth)) { 2736 abort_connection(ep, NULL, GFP_KERNEL); 2737 err = -EINVAL; 2738 goto err; 2739 } 2740 2741 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 2742 if (conn_param->ord > ep->ird) { 2743 ep->ird = conn_param->ird; 2744 ep->ord = conn_param->ord; 2745 send_mpa_reject(ep, conn_param->private_data, 2746 conn_param->private_data_len); 2747 abort_connection(ep, NULL, GFP_KERNEL); 2748 err = -ENOMEM; 2749 goto err; 2750 } 2751 if (conn_param->ird > ep->ord) { 2752 if (!ep->ord) 2753 conn_param->ird = 1; 2754 else { 2755 abort_connection(ep, NULL, GFP_KERNEL); 2756 err = -ENOMEM; 2757 goto err; 2758 } 2759 } 2760 2761 } 2762 ep->ird = conn_param->ird; 2763 ep->ord = conn_param->ord; 2764 2765 if (ep->mpa_attr.version != 2) 2766 if (peer2peer && ep->ird == 0) 2767 ep->ird = 1; 2768 2769 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); 2770 2771 cm_id->add_ref(cm_id); 2772 ep->com.cm_id = cm_id; 2773 ep->com.qp = qp; 2774 ref_qp(ep); 2775 2776 /* bind QP to EP and move to RTS */ 2777 attrs.mpa_attr = ep->mpa_attr; 2778 attrs.max_ird = ep->ird; 2779 attrs.max_ord = ep->ord; 2780 attrs.llp_stream_handle = ep; 2781 attrs.next_state = C4IW_QP_STATE_RTS; 2782 2783 /* bind QP and TID with INIT_WR */ 2784 mask = C4IW_QP_ATTR_NEXT_STATE | 2785 C4IW_QP_ATTR_LLP_STREAM_HANDLE | 2786 C4IW_QP_ATTR_MPA_ATTR | 2787 C4IW_QP_ATTR_MAX_IRD | 2788 C4IW_QP_ATTR_MAX_ORD; 2789 2790 err = c4iw_modify_qp(ep->com.qp->rhp, 2791 ep->com.qp, mask, &attrs, 1); 2792 if (err) 2793 goto err1; 2794 err = send_mpa_reply(ep, conn_param->private_data, 2795 conn_param->private_data_len); 2796 if (err) 2797 goto err1; 2798 2799 __state_set(&ep->com, FPDU_MODE); 2800 established_upcall(ep); 2801 mutex_unlock(&ep->com.mutex); 2802 c4iw_put_ep(&ep->com); 2803 return 0; 2804 err1: 2805 ep->com.cm_id = NULL; 2806 cm_id->rem_ref(cm_id); 2807 err: 2808 mutex_unlock(&ep->com.mutex); 2809 c4iw_put_ep(&ep->com); 2810 return err; 2811 } 2812 2813 static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id) 2814 { 2815 struct in_device *ind; 2816 int found = 0; 2817 struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr; 2818 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr; 2819 2820 ind = in_dev_get(dev->rdev.lldi.ports[0]); 2821 if (!ind) 2822 return -EADDRNOTAVAIL; 2823 for_primary_ifa(ind) { 2824 laddr->sin_addr.s_addr = ifa->ifa_address; 2825 raddr->sin_addr.s_addr = ifa->ifa_address; 2826 found = 1; 2827 break; 2828 } 2829 endfor_ifa(ind); 2830 in_dev_put(ind); 2831 return found ? 0 : -EADDRNOTAVAIL; 2832 } 2833 2834 static int get_lladdr(struct net_device *dev, struct in6_addr *addr, 2835 unsigned char banned_flags) 2836 { 2837 struct inet6_dev *idev; 2838 int err = -EADDRNOTAVAIL; 2839 2840 rcu_read_lock(); 2841 idev = __in6_dev_get(dev); 2842 if (idev != NULL) { 2843 struct inet6_ifaddr *ifp; 2844 2845 read_lock_bh(&idev->lock); 2846 list_for_each_entry(ifp, &idev->addr_list, if_list) { 2847 if (ifp->scope == IFA_LINK && 2848 !(ifp->flags & banned_flags)) { 2849 memcpy(addr, &ifp->addr, 16); 2850 err = 0; 2851 break; 2852 } 2853 } 2854 read_unlock_bh(&idev->lock); 2855 } 2856 rcu_read_unlock(); 2857 return err; 2858 } 2859 2860 static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id) 2861 { 2862 struct in6_addr uninitialized_var(addr); 2863 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->local_addr; 2864 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->remote_addr; 2865 2866 if (get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) { 2867 memcpy(la6->sin6_addr.s6_addr, &addr, 16); 2868 memcpy(ra6->sin6_addr.s6_addr, &addr, 16); 2869 return 0; 2870 } 2871 return -EADDRNOTAVAIL; 2872 } 2873 2874 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 2875 { 2876 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2877 struct c4iw_ep *ep; 2878 int err = 0; 2879 struct sockaddr_in *laddr; 2880 struct sockaddr_in *raddr; 2881 struct sockaddr_in6 *laddr6; 2882 struct sockaddr_in6 *raddr6; 2883 struct iwpm_dev_data pm_reg_msg; 2884 struct iwpm_sa_data pm_msg; 2885 __u8 *ra; 2886 int iptype; 2887 int iwpm_err = 0; 2888 2889 if ((conn_param->ord > c4iw_max_read_depth) || 2890 (conn_param->ird > c4iw_max_read_depth)) { 2891 err = -EINVAL; 2892 goto out; 2893 } 2894 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 2895 if (!ep) { 2896 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 2897 err = -ENOMEM; 2898 goto out; 2899 } 2900 init_timer(&ep->timer); 2901 ep->plen = conn_param->private_data_len; 2902 if (ep->plen) 2903 memcpy(ep->mpa_pkt + sizeof(struct mpa_message), 2904 conn_param->private_data, ep->plen); 2905 ep->ird = conn_param->ird; 2906 ep->ord = conn_param->ord; 2907 2908 if (peer2peer && ep->ord == 0) 2909 ep->ord = 1; 2910 2911 cm_id->add_ref(cm_id); 2912 ep->com.dev = dev; 2913 ep->com.cm_id = cm_id; 2914 ep->com.qp = get_qhp(dev, conn_param->qpn); 2915 if (!ep->com.qp) { 2916 PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn); 2917 err = -EINVAL; 2918 goto fail1; 2919 } 2920 ref_qp(ep); 2921 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, 2922 ep->com.qp, cm_id); 2923 2924 /* 2925 * Allocate an active TID to initiate a TCP connection. 2926 */ 2927 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep); 2928 if (ep->atid == -1) { 2929 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); 2930 err = -ENOMEM; 2931 goto fail1; 2932 } 2933 insert_handle(dev, &dev->atid_idr, ep, ep->atid); 2934 2935 memcpy(&ep->com.local_addr, &cm_id->local_addr, 2936 sizeof(ep->com.local_addr)); 2937 memcpy(&ep->com.remote_addr, &cm_id->remote_addr, 2938 sizeof(ep->com.remote_addr)); 2939 2940 /* No port mapper available, go with the specified peer information */ 2941 memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr, 2942 sizeof(ep->com.mapped_local_addr)); 2943 memcpy(&ep->com.mapped_remote_addr, &cm_id->remote_addr, 2944 sizeof(ep->com.mapped_remote_addr)); 2945 2946 c4iw_form_reg_msg(dev, &pm_reg_msg); 2947 iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW); 2948 if (iwpm_err) { 2949 PDBG("%s: Port Mapper reg pid fail (err = %d).\n", 2950 __func__, iwpm_err); 2951 } 2952 if (iwpm_valid_pid() && !iwpm_err) { 2953 c4iw_form_pm_msg(ep, &pm_msg); 2954 iwpm_err = iwpm_add_and_query_mapping(&pm_msg, RDMA_NL_C4IW); 2955 if (iwpm_err) 2956 PDBG("%s: Port Mapper query fail (err = %d).\n", 2957 __func__, iwpm_err); 2958 else 2959 c4iw_record_pm_msg(ep, &pm_msg); 2960 } 2961 if (iwpm_create_mapinfo(&ep->com.local_addr, 2962 &ep->com.mapped_local_addr, RDMA_NL_C4IW)) { 2963 iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW); 2964 err = -ENOMEM; 2965 goto fail1; 2966 } 2967 print_addr(&ep->com, __func__, "add_query/create_mapinfo"); 2968 set_bit(RELEASE_MAPINFO, &ep->com.flags); 2969 2970 laddr = (struct sockaddr_in *)&ep->com.mapped_local_addr; 2971 raddr = (struct sockaddr_in *)&ep->com.mapped_remote_addr; 2972 laddr6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr; 2973 raddr6 = (struct sockaddr_in6 *) &ep->com.mapped_remote_addr; 2974 2975 if (cm_id->remote_addr.ss_family == AF_INET) { 2976 iptype = 4; 2977 ra = (__u8 *)&raddr->sin_addr; 2978 2979 /* 2980 * Handle loopback requests to INADDR_ANY. 2981 */ 2982 if ((__force int)raddr->sin_addr.s_addr == INADDR_ANY) { 2983 err = pick_local_ipaddrs(dev, cm_id); 2984 if (err) 2985 goto fail1; 2986 } 2987 2988 /* find a route */ 2989 PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n", 2990 __func__, &laddr->sin_addr, ntohs(laddr->sin_port), 2991 ra, ntohs(raddr->sin_port)); 2992 ep->dst = find_route(dev, laddr->sin_addr.s_addr, 2993 raddr->sin_addr.s_addr, laddr->sin_port, 2994 raddr->sin_port, 0); 2995 } else { 2996 iptype = 6; 2997 ra = (__u8 *)&raddr6->sin6_addr; 2998 2999 /* 3000 * Handle loopback requests to INADDR_ANY. 3001 */ 3002 if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) { 3003 err = pick_local_ip6addrs(dev, cm_id); 3004 if (err) 3005 goto fail1; 3006 } 3007 3008 /* find a route */ 3009 PDBG("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n", 3010 __func__, laddr6->sin6_addr.s6_addr, 3011 ntohs(laddr6->sin6_port), 3012 raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port)); 3013 ep->dst = find_route6(dev, laddr6->sin6_addr.s6_addr, 3014 raddr6->sin6_addr.s6_addr, 3015 laddr6->sin6_port, raddr6->sin6_port, 0, 3016 raddr6->sin6_scope_id); 3017 } 3018 if (!ep->dst) { 3019 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); 3020 err = -EHOSTUNREACH; 3021 goto fail2; 3022 } 3023 3024 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true); 3025 if (err) { 3026 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 3027 goto fail3; 3028 } 3029 3030 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 3031 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 3032 ep->l2t->idx); 3033 3034 state_set(&ep->com, CONNECTING); 3035 ep->tos = 0; 3036 3037 /* send connect request to rnic */ 3038 err = send_connect(ep); 3039 if (!err) 3040 goto out; 3041 3042 cxgb4_l2t_release(ep->l2t); 3043 fail3: 3044 dst_release(ep->dst); 3045 fail2: 3046 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 3047 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 3048 fail1: 3049 cm_id->rem_ref(cm_id); 3050 c4iw_put_ep(&ep->com); 3051 out: 3052 return err; 3053 } 3054 3055 static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) 3056 { 3057 int err; 3058 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) 3059 &ep->com.mapped_local_addr; 3060 3061 c4iw_init_wr_wait(&ep->com.wr_wait); 3062 err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0], 3063 ep->stid, &sin6->sin6_addr, 3064 sin6->sin6_port, 3065 ep->com.dev->rdev.lldi.rxq_ids[0]); 3066 if (!err) 3067 err = c4iw_wait_for_reply(&ep->com.dev->rdev, 3068 &ep->com.wr_wait, 3069 0, 0, __func__); 3070 if (err) 3071 pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n", 3072 err, ep->stid, 3073 sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port)); 3074 return err; 3075 } 3076 3077 static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) 3078 { 3079 int err; 3080 struct sockaddr_in *sin = (struct sockaddr_in *) 3081 &ep->com.mapped_local_addr; 3082 3083 if (dev->rdev.lldi.enable_fw_ofld_conn) { 3084 do { 3085 err = cxgb4_create_server_filter( 3086 ep->com.dev->rdev.lldi.ports[0], ep->stid, 3087 sin->sin_addr.s_addr, sin->sin_port, 0, 3088 ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0); 3089 if (err == -EBUSY) { 3090 set_current_state(TASK_UNINTERRUPTIBLE); 3091 schedule_timeout(usecs_to_jiffies(100)); 3092 } 3093 } while (err == -EBUSY); 3094 } else { 3095 c4iw_init_wr_wait(&ep->com.wr_wait); 3096 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], 3097 ep->stid, sin->sin_addr.s_addr, sin->sin_port, 3098 0, ep->com.dev->rdev.lldi.rxq_ids[0]); 3099 if (!err) 3100 err = c4iw_wait_for_reply(&ep->com.dev->rdev, 3101 &ep->com.wr_wait, 3102 0, 0, __func__); 3103 } 3104 if (err) 3105 pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n" 3106 , err, ep->stid, 3107 &sin->sin_addr, ntohs(sin->sin_port)); 3108 return err; 3109 } 3110 3111 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) 3112 { 3113 int err = 0; 3114 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 3115 struct c4iw_listen_ep *ep; 3116 struct iwpm_dev_data pm_reg_msg; 3117 struct iwpm_sa_data pm_msg; 3118 int iwpm_err = 0; 3119 3120 might_sleep(); 3121 3122 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 3123 if (!ep) { 3124 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 3125 err = -ENOMEM; 3126 goto fail1; 3127 } 3128 PDBG("%s ep %p\n", __func__, ep); 3129 cm_id->add_ref(cm_id); 3130 ep->com.cm_id = cm_id; 3131 ep->com.dev = dev; 3132 ep->backlog = backlog; 3133 memcpy(&ep->com.local_addr, &cm_id->local_addr, 3134 sizeof(ep->com.local_addr)); 3135 3136 /* 3137 * Allocate a server TID. 3138 */ 3139 if (dev->rdev.lldi.enable_fw_ofld_conn && 3140 ep->com.local_addr.ss_family == AF_INET) 3141 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, 3142 cm_id->local_addr.ss_family, ep); 3143 else 3144 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, 3145 cm_id->local_addr.ss_family, ep); 3146 3147 if (ep->stid == -1) { 3148 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__); 3149 err = -ENOMEM; 3150 goto fail2; 3151 } 3152 insert_handle(dev, &dev->stid_idr, ep, ep->stid); 3153 3154 /* No port mapper available, go with the specified info */ 3155 memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr, 3156 sizeof(ep->com.mapped_local_addr)); 3157 3158 c4iw_form_reg_msg(dev, &pm_reg_msg); 3159 iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW); 3160 if (iwpm_err) { 3161 PDBG("%s: Port Mapper reg pid fail (err = %d).\n", 3162 __func__, iwpm_err); 3163 } 3164 if (iwpm_valid_pid() && !iwpm_err) { 3165 memcpy(&pm_msg.loc_addr, &ep->com.local_addr, 3166 sizeof(ep->com.local_addr)); 3167 iwpm_err = iwpm_add_mapping(&pm_msg, RDMA_NL_C4IW); 3168 if (iwpm_err) 3169 PDBG("%s: Port Mapper query fail (err = %d).\n", 3170 __func__, iwpm_err); 3171 else 3172 memcpy(&ep->com.mapped_local_addr, 3173 &pm_msg.mapped_loc_addr, 3174 sizeof(ep->com.mapped_local_addr)); 3175 } 3176 if (iwpm_create_mapinfo(&ep->com.local_addr, 3177 &ep->com.mapped_local_addr, RDMA_NL_C4IW)) { 3178 err = -ENOMEM; 3179 goto fail3; 3180 } 3181 print_addr(&ep->com, __func__, "add_mapping/create_mapinfo"); 3182 3183 set_bit(RELEASE_MAPINFO, &ep->com.flags); 3184 state_set(&ep->com, LISTEN); 3185 if (ep->com.local_addr.ss_family == AF_INET) 3186 err = create_server4(dev, ep); 3187 else 3188 err = create_server6(dev, ep); 3189 if (!err) { 3190 cm_id->provider_data = ep; 3191 goto out; 3192 } 3193 3194 fail3: 3195 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, 3196 ep->com.local_addr.ss_family); 3197 fail2: 3198 cm_id->rem_ref(cm_id); 3199 c4iw_put_ep(&ep->com); 3200 fail1: 3201 out: 3202 return err; 3203 } 3204 3205 int c4iw_destroy_listen(struct iw_cm_id *cm_id) 3206 { 3207 int err; 3208 struct c4iw_listen_ep *ep = to_listen_ep(cm_id); 3209 3210 PDBG("%s ep %p\n", __func__, ep); 3211 3212 might_sleep(); 3213 state_set(&ep->com, DEAD); 3214 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn && 3215 ep->com.local_addr.ss_family == AF_INET) { 3216 err = cxgb4_remove_server_filter( 3217 ep->com.dev->rdev.lldi.ports[0], ep->stid, 3218 ep->com.dev->rdev.lldi.rxq_ids[0], 0); 3219 } else { 3220 c4iw_init_wr_wait(&ep->com.wr_wait); 3221 err = cxgb4_remove_server( 3222 ep->com.dev->rdev.lldi.ports[0], ep->stid, 3223 ep->com.dev->rdev.lldi.rxq_ids[0], 0); 3224 if (err) 3225 goto done; 3226 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 3227 0, 0, __func__); 3228 } 3229 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid); 3230 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, 3231 ep->com.local_addr.ss_family); 3232 done: 3233 cm_id->rem_ref(cm_id); 3234 c4iw_put_ep(&ep->com); 3235 return err; 3236 } 3237 3238 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) 3239 { 3240 int ret = 0; 3241 int close = 0; 3242 int fatal = 0; 3243 struct c4iw_rdev *rdev; 3244 3245 mutex_lock(&ep->com.mutex); 3246 3247 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, 3248 states[ep->com.state], abrupt); 3249 3250 rdev = &ep->com.dev->rdev; 3251 if (c4iw_fatal_error(rdev)) { 3252 fatal = 1; 3253 close_complete_upcall(ep, -EIO); 3254 ep->com.state = DEAD; 3255 } 3256 switch (ep->com.state) { 3257 case MPA_REQ_WAIT: 3258 case MPA_REQ_SENT: 3259 case MPA_REQ_RCVD: 3260 case MPA_REP_SENT: 3261 case FPDU_MODE: 3262 close = 1; 3263 if (abrupt) 3264 ep->com.state = ABORTING; 3265 else { 3266 ep->com.state = CLOSING; 3267 start_ep_timer(ep); 3268 } 3269 set_bit(CLOSE_SENT, &ep->com.flags); 3270 break; 3271 case CLOSING: 3272 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { 3273 close = 1; 3274 if (abrupt) { 3275 (void)stop_ep_timer(ep); 3276 ep->com.state = ABORTING; 3277 } else 3278 ep->com.state = MORIBUND; 3279 } 3280 break; 3281 case MORIBUND: 3282 case ABORTING: 3283 case DEAD: 3284 PDBG("%s ignoring disconnect ep %p state %u\n", 3285 __func__, ep, ep->com.state); 3286 break; 3287 default: 3288 BUG(); 3289 break; 3290 } 3291 3292 if (close) { 3293 if (abrupt) { 3294 set_bit(EP_DISC_ABORT, &ep->com.history); 3295 close_complete_upcall(ep, -ECONNRESET); 3296 ret = send_abort(ep, NULL, gfp); 3297 } else { 3298 set_bit(EP_DISC_CLOSE, &ep->com.history); 3299 ret = send_halfclose(ep, gfp); 3300 } 3301 if (ret) 3302 fatal = 1; 3303 } 3304 mutex_unlock(&ep->com.mutex); 3305 if (fatal) 3306 release_ep_resources(ep); 3307 return ret; 3308 } 3309 3310 static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, 3311 struct cpl_fw6_msg_ofld_connection_wr_rpl *req) 3312 { 3313 struct c4iw_ep *ep; 3314 int atid = be32_to_cpu(req->tid); 3315 3316 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, 3317 (__force u32) req->tid); 3318 if (!ep) 3319 return; 3320 3321 switch (req->retval) { 3322 case FW_ENOMEM: 3323 set_bit(ACT_RETRY_NOMEM, &ep->com.history); 3324 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 3325 send_fw_act_open_req(ep, atid); 3326 return; 3327 } 3328 case FW_EADDRINUSE: 3329 set_bit(ACT_RETRY_INUSE, &ep->com.history); 3330 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 3331 send_fw_act_open_req(ep, atid); 3332 return; 3333 } 3334 break; 3335 default: 3336 pr_info("%s unexpected ofld conn wr retval %d\n", 3337 __func__, req->retval); 3338 break; 3339 } 3340 pr_err("active ofld_connect_wr failure %d atid %d\n", 3341 req->retval, atid); 3342 mutex_lock(&dev->rdev.stats.lock); 3343 dev->rdev.stats.act_ofld_conn_fails++; 3344 mutex_unlock(&dev->rdev.stats.lock); 3345 connect_reply_upcall(ep, status2errno(req->retval)); 3346 state_set(&ep->com, DEAD); 3347 remove_handle(dev, &dev->atid_idr, atid); 3348 cxgb4_free_atid(dev->rdev.lldi.tids, atid); 3349 dst_release(ep->dst); 3350 cxgb4_l2t_release(ep->l2t); 3351 c4iw_put_ep(&ep->com); 3352 } 3353 3354 static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, 3355 struct cpl_fw6_msg_ofld_connection_wr_rpl *req) 3356 { 3357 struct sk_buff *rpl_skb; 3358 struct cpl_pass_accept_req *cpl; 3359 int ret; 3360 3361 rpl_skb = (struct sk_buff *)(unsigned long)req->cookie; 3362 BUG_ON(!rpl_skb); 3363 if (req->retval) { 3364 PDBG("%s passive open failure %d\n", __func__, req->retval); 3365 mutex_lock(&dev->rdev.stats.lock); 3366 dev->rdev.stats.pas_ofld_conn_fails++; 3367 mutex_unlock(&dev->rdev.stats.lock); 3368 kfree_skb(rpl_skb); 3369 } else { 3370 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb); 3371 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 3372 (__force u32) htonl( 3373 (__force u32) req->tid))); 3374 ret = pass_accept_req(dev, rpl_skb); 3375 if (!ret) 3376 kfree_skb(rpl_skb); 3377 } 3378 return; 3379 } 3380 3381 static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) 3382 { 3383 struct cpl_fw6_msg *rpl = cplhdr(skb); 3384 struct cpl_fw6_msg_ofld_connection_wr_rpl *req; 3385 3386 switch (rpl->type) { 3387 case FW6_TYPE_CQE: 3388 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); 3389 break; 3390 case FW6_TYPE_OFLD_CONNECTION_WR_RPL: 3391 req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data; 3392 switch (req->t_state) { 3393 case TCP_SYN_SENT: 3394 active_ofld_conn_reply(dev, skb, req); 3395 break; 3396 case TCP_SYN_RECV: 3397 passive_ofld_conn_reply(dev, skb, req); 3398 break; 3399 default: 3400 pr_err("%s unexpected ofld conn wr state %d\n", 3401 __func__, req->t_state); 3402 break; 3403 } 3404 break; 3405 } 3406 return 0; 3407 } 3408 3409 static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) 3410 { 3411 u32 l2info; 3412 u16 vlantag, len, hdr_len, eth_hdr_len; 3413 u8 intf; 3414 struct cpl_rx_pkt *cpl = cplhdr(skb); 3415 struct cpl_pass_accept_req *req; 3416 struct tcp_options_received tmp_opt; 3417 struct c4iw_dev *dev; 3418 3419 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); 3420 /* Store values from cpl_rx_pkt in temporary location. */ 3421 vlantag = (__force u16) cpl->vlan; 3422 len = (__force u16) cpl->len; 3423 l2info = (__force u32) cpl->l2info; 3424 hdr_len = (__force u16) cpl->hdr_len; 3425 intf = cpl->iff; 3426 3427 __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header)); 3428 3429 /* 3430 * We need to parse the TCP options from SYN packet. 3431 * to generate cpl_pass_accept_req. 3432 */ 3433 memset(&tmp_opt, 0, sizeof(tmp_opt)); 3434 tcp_clear_options(&tmp_opt); 3435 tcp_parse_options(skb, &tmp_opt, 0, NULL); 3436 3437 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); 3438 memset(req, 0, sizeof(*req)); 3439 req->l2info = cpu_to_be16(V_SYN_INTF(intf) | 3440 V_SYN_MAC_IDX(G_RX_MACIDX( 3441 (__force int) htonl(l2info))) | 3442 F_SYN_XACT_MATCH); 3443 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ? 3444 G_RX_ETHHDR_LEN((__force int) htonl(l2info)) : 3445 G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info)); 3446 req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN( 3447 (__force int) htonl(l2info))) | 3448 V_TCP_HDR_LEN(G_RX_TCPHDR_LEN( 3449 (__force int) htons(hdr_len))) | 3450 V_IP_HDR_LEN(G_RX_IPHDR_LEN( 3451 (__force int) htons(hdr_len))) | 3452 V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len))); 3453 req->vlan = (__force __be16) vlantag; 3454 req->len = (__force __be16) len; 3455 req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) | 3456 PASS_OPEN_TOS(tos)); 3457 req->tcpopt.mss = htons(tmp_opt.mss_clamp); 3458 if (tmp_opt.wscale_ok) 3459 req->tcpopt.wsf = tmp_opt.snd_wscale; 3460 req->tcpopt.tstamp = tmp_opt.saw_tstamp; 3461 if (tmp_opt.sack_ok) 3462 req->tcpopt.sack = 1; 3463 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0)); 3464 return; 3465 } 3466 3467 static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, 3468 __be32 laddr, __be16 lport, 3469 __be32 raddr, __be16 rport, 3470 u32 rcv_isn, u32 filter, u16 window, 3471 u32 rss_qid, u8 port_id) 3472 { 3473 struct sk_buff *req_skb; 3474 struct fw_ofld_connection_wr *req; 3475 struct cpl_pass_accept_req *cpl = cplhdr(skb); 3476 int ret; 3477 3478 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); 3479 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req)); 3480 memset(req, 0, sizeof(*req)); 3481 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1)); 3482 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); 3483 req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL); 3484 req->le.filter = (__force __be32) filter; 3485 req->le.lport = lport; 3486 req->le.pport = rport; 3487 req->le.u.ipv4.lip = laddr; 3488 req->le.u.ipv4.pip = raddr; 3489 req->tcb.rcv_nxt = htonl(rcv_isn + 1); 3490 req->tcb.rcv_adv = htons(window); 3491 req->tcb.t_state_to_astid = 3492 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV) | 3493 V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl->tcpopt.wsf) | 3494 V_FW_OFLD_CONNECTION_WR_ASTID( 3495 GET_PASS_OPEN_TID(ntohl(cpl->tos_stid)))); 3496 3497 /* 3498 * We store the qid in opt2 which will be used by the firmware 3499 * to send us the wr response. 3500 */ 3501 req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid)); 3502 3503 /* 3504 * We initialize the MSS index in TCB to 0xF. 3505 * So that when driver sends cpl_pass_accept_rpl 3506 * TCB picks up the correct value. If this was 0 3507 * TP will ignore any value > 0 for MSS index. 3508 */ 3509 req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF)); 3510 req->cookie = (unsigned long)skb; 3511 3512 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); 3513 ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); 3514 if (ret < 0) { 3515 pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__, 3516 ret); 3517 kfree_skb(skb); 3518 kfree_skb(req_skb); 3519 } 3520 } 3521 3522 /* 3523 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt 3524 * messages when a filter is being used instead of server to 3525 * redirect a syn packet. When packets hit filter they are redirected 3526 * to the offload queue and driver tries to establish the connection 3527 * using firmware work request. 3528 */ 3529 static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) 3530 { 3531 int stid; 3532 unsigned int filter; 3533 struct ethhdr *eh = NULL; 3534 struct vlan_ethhdr *vlan_eh = NULL; 3535 struct iphdr *iph; 3536 struct tcphdr *tcph; 3537 struct rss_header *rss = (void *)skb->data; 3538 struct cpl_rx_pkt *cpl = (void *)skb->data; 3539 struct cpl_pass_accept_req *req = (void *)(rss + 1); 3540 struct l2t_entry *e; 3541 struct dst_entry *dst; 3542 struct c4iw_ep *lep; 3543 u16 window; 3544 struct port_info *pi; 3545 struct net_device *pdev; 3546 u16 rss_qid, eth_hdr_len; 3547 int step; 3548 u32 tx_chan; 3549 struct neighbour *neigh; 3550 3551 /* Drop all non-SYN packets */ 3552 if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN))) 3553 goto reject; 3554 3555 /* 3556 * Drop all packets which did not hit the filter. 3557 * Unlikely to happen. 3558 */ 3559 if (!(rss->filter_hit && rss->filter_tid)) 3560 goto reject; 3561 3562 /* 3563 * Calculate the server tid from filter hit index from cpl_rx_pkt. 3564 */ 3565 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val); 3566 3567 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); 3568 if (!lep) { 3569 PDBG("%s connect request on invalid stid %d\n", __func__, stid); 3570 goto reject; 3571 } 3572 3573 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ? 3574 G_RX_ETHHDR_LEN(htonl(cpl->l2info)) : 3575 G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info)); 3576 if (eth_hdr_len == ETH_HLEN) { 3577 eh = (struct ethhdr *)(req + 1); 3578 iph = (struct iphdr *)(eh + 1); 3579 } else { 3580 vlan_eh = (struct vlan_ethhdr *)(req + 1); 3581 iph = (struct iphdr *)(vlan_eh + 1); 3582 skb->vlan_tci = ntohs(cpl->vlan); 3583 } 3584 3585 if (iph->version != 0x4) 3586 goto reject; 3587 3588 tcph = (struct tcphdr *)(iph + 1); 3589 skb_set_network_header(skb, (void *)iph - (void *)rss); 3590 skb_set_transport_header(skb, (void *)tcph - (void *)rss); 3591 skb_get(skb); 3592 3593 PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__, 3594 ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr), 3595 ntohs(tcph->source), iph->tos); 3596 3597 dst = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source, 3598 iph->tos); 3599 if (!dst) { 3600 pr_err("%s - failed to find dst entry!\n", 3601 __func__); 3602 goto reject; 3603 } 3604 neigh = dst_neigh_lookup_skb(dst, skb); 3605 3606 if (!neigh) { 3607 pr_err("%s - failed to allocate neigh!\n", 3608 __func__); 3609 goto free_dst; 3610 } 3611 3612 if (neigh->dev->flags & IFF_LOOPBACK) { 3613 pdev = ip_dev_find(&init_net, iph->daddr); 3614 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, 3615 pdev, 0); 3616 pi = (struct port_info *)netdev_priv(pdev); 3617 tx_chan = cxgb4_port_chan(pdev); 3618 dev_put(pdev); 3619 } else { 3620 pdev = get_real_dev(neigh->dev); 3621 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, 3622 pdev, 0); 3623 pi = (struct port_info *)netdev_priv(pdev); 3624 tx_chan = cxgb4_port_chan(pdev); 3625 } 3626 neigh_release(neigh); 3627 if (!e) { 3628 pr_err("%s - failed to allocate l2t entry!\n", 3629 __func__); 3630 goto free_dst; 3631 } 3632 3633 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; 3634 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; 3635 window = (__force u16) htons((__force u16)tcph->window); 3636 3637 /* Calcuate filter portion for LE region. */ 3638 filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple( 3639 dev->rdev.lldi.ports[0], 3640 e)); 3641 3642 /* 3643 * Synthesize the cpl_pass_accept_req. We have everything except the 3644 * TID. Once firmware sends a reply with TID we update the TID field 3645 * in cpl and pass it through the regular cpl_pass_accept_req path. 3646 */ 3647 build_cpl_pass_accept_req(skb, stid, iph->tos); 3648 send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr, 3649 tcph->source, ntohl(tcph->seq), filter, window, 3650 rss_qid, pi->port_id); 3651 cxgb4_l2t_release(e); 3652 free_dst: 3653 dst_release(dst); 3654 reject: 3655 return 0; 3656 } 3657 3658 /* 3659 * These are the real handlers that are called from a 3660 * work queue. 3661 */ 3662 static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = { 3663 [CPL_ACT_ESTABLISH] = act_establish, 3664 [CPL_ACT_OPEN_RPL] = act_open_rpl, 3665 [CPL_RX_DATA] = rx_data, 3666 [CPL_ABORT_RPL_RSS] = abort_rpl, 3667 [CPL_ABORT_RPL] = abort_rpl, 3668 [CPL_PASS_OPEN_RPL] = pass_open_rpl, 3669 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl, 3670 [CPL_PASS_ACCEPT_REQ] = pass_accept_req, 3671 [CPL_PASS_ESTABLISH] = pass_establish, 3672 [CPL_PEER_CLOSE] = peer_close, 3673 [CPL_ABORT_REQ_RSS] = peer_abort, 3674 [CPL_CLOSE_CON_RPL] = close_con_rpl, 3675 [CPL_RDMA_TERMINATE] = terminate, 3676 [CPL_FW4_ACK] = fw4_ack, 3677 [CPL_FW6_MSG] = deferred_fw6_msg, 3678 [CPL_RX_PKT] = rx_pkt 3679 }; 3680 3681 static void process_timeout(struct c4iw_ep *ep) 3682 { 3683 struct c4iw_qp_attributes attrs; 3684 int abort = 1; 3685 3686 mutex_lock(&ep->com.mutex); 3687 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, 3688 ep->com.state); 3689 set_bit(TIMEDOUT, &ep->com.history); 3690 switch (ep->com.state) { 3691 case MPA_REQ_SENT: 3692 __state_set(&ep->com, ABORTING); 3693 connect_reply_upcall(ep, -ETIMEDOUT); 3694 break; 3695 case MPA_REQ_WAIT: 3696 __state_set(&ep->com, ABORTING); 3697 break; 3698 case CLOSING: 3699 case MORIBUND: 3700 if (ep->com.cm_id && ep->com.qp) { 3701 attrs.next_state = C4IW_QP_STATE_ERROR; 3702 c4iw_modify_qp(ep->com.qp->rhp, 3703 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 3704 &attrs, 1); 3705 } 3706 __state_set(&ep->com, ABORTING); 3707 close_complete_upcall(ep, -ETIMEDOUT); 3708 break; 3709 case ABORTING: 3710 case DEAD: 3711 3712 /* 3713 * These states are expected if the ep timed out at the same 3714 * time as another thread was calling stop_ep_timer(). 3715 * So we silently do nothing for these states. 3716 */ 3717 abort = 0; 3718 break; 3719 default: 3720 WARN(1, "%s unexpected state ep %p tid %u state %u\n", 3721 __func__, ep, ep->hwtid, ep->com.state); 3722 abort = 0; 3723 } 3724 if (abort) 3725 abort_connection(ep, NULL, GFP_KERNEL); 3726 mutex_unlock(&ep->com.mutex); 3727 c4iw_put_ep(&ep->com); 3728 } 3729 3730 static void process_timedout_eps(void) 3731 { 3732 struct c4iw_ep *ep; 3733 3734 spin_lock_irq(&timeout_lock); 3735 while (!list_empty(&timeout_list)) { 3736 struct list_head *tmp; 3737 3738 tmp = timeout_list.next; 3739 list_del(tmp); 3740 tmp->next = NULL; 3741 tmp->prev = NULL; 3742 spin_unlock_irq(&timeout_lock); 3743 ep = list_entry(tmp, struct c4iw_ep, entry); 3744 process_timeout(ep); 3745 spin_lock_irq(&timeout_lock); 3746 } 3747 spin_unlock_irq(&timeout_lock); 3748 } 3749 3750 static void process_work(struct work_struct *work) 3751 { 3752 struct sk_buff *skb = NULL; 3753 struct c4iw_dev *dev; 3754 struct cpl_act_establish *rpl; 3755 unsigned int opcode; 3756 int ret; 3757 3758 process_timedout_eps(); 3759 while ((skb = skb_dequeue(&rxq))) { 3760 rpl = cplhdr(skb); 3761 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); 3762 opcode = rpl->ot.opcode; 3763 3764 BUG_ON(!work_handlers[opcode]); 3765 ret = work_handlers[opcode](dev, skb); 3766 if (!ret) 3767 kfree_skb(skb); 3768 process_timedout_eps(); 3769 } 3770 } 3771 3772 static DECLARE_WORK(skb_work, process_work); 3773 3774 static void ep_timeout(unsigned long arg) 3775 { 3776 struct c4iw_ep *ep = (struct c4iw_ep *)arg; 3777 int kickit = 0; 3778 3779 spin_lock(&timeout_lock); 3780 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 3781 /* 3782 * Only insert if it is not already on the list. 3783 */ 3784 if (!ep->entry.next) { 3785 list_add_tail(&ep->entry, &timeout_list); 3786 kickit = 1; 3787 } 3788 } 3789 spin_unlock(&timeout_lock); 3790 if (kickit) 3791 queue_work(workq, &skb_work); 3792 } 3793 3794 /* 3795 * All the CM events are handled on a work queue to have a safe context. 3796 */ 3797 static int sched(struct c4iw_dev *dev, struct sk_buff *skb) 3798 { 3799 3800 /* 3801 * Save dev in the skb->cb area. 3802 */ 3803 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev; 3804 3805 /* 3806 * Queue the skb and schedule the worker thread. 3807 */ 3808 skb_queue_tail(&rxq, skb); 3809 queue_work(workq, &skb_work); 3810 return 0; 3811 } 3812 3813 static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 3814 { 3815 struct cpl_set_tcb_rpl *rpl = cplhdr(skb); 3816 3817 if (rpl->status != CPL_ERR_NONE) { 3818 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u " 3819 "for tid %u\n", rpl->status, GET_TID(rpl)); 3820 } 3821 kfree_skb(skb); 3822 return 0; 3823 } 3824 3825 static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) 3826 { 3827 struct cpl_fw6_msg *rpl = cplhdr(skb); 3828 struct c4iw_wr_wait *wr_waitp; 3829 int ret; 3830 3831 PDBG("%s type %u\n", __func__, rpl->type); 3832 3833 switch (rpl->type) { 3834 case FW6_TYPE_WR_RPL: 3835 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); 3836 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; 3837 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); 3838 if (wr_waitp) 3839 c4iw_wake_up(wr_waitp, ret ? -ret : 0); 3840 kfree_skb(skb); 3841 break; 3842 case FW6_TYPE_CQE: 3843 case FW6_TYPE_OFLD_CONNECTION_WR_RPL: 3844 sched(dev, skb); 3845 break; 3846 default: 3847 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, 3848 rpl->type); 3849 kfree_skb(skb); 3850 break; 3851 } 3852 return 0; 3853 } 3854 3855 static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) 3856 { 3857 struct cpl_abort_req_rss *req = cplhdr(skb); 3858 struct c4iw_ep *ep; 3859 struct tid_info *t = dev->rdev.lldi.tids; 3860 unsigned int tid = GET_TID(req); 3861 3862 ep = lookup_tid(t, tid); 3863 if (!ep) { 3864 printk(KERN_WARNING MOD 3865 "Abort on non-existent endpoint, tid %d\n", tid); 3866 kfree_skb(skb); 3867 return 0; 3868 } 3869 if (is_neg_adv(req->status)) { 3870 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, 3871 ep->hwtid); 3872 kfree_skb(skb); 3873 return 0; 3874 } 3875 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 3876 ep->com.state); 3877 3878 /* 3879 * Wake up any threads in rdma_init() or rdma_fini(). 3880 * However, if we are on MPAv2 and want to retry with MPAv1 3881 * then, don't wake up yet. 3882 */ 3883 if (mpa_rev == 2 && !ep->tried_with_mpa_v1) { 3884 if (ep->com.state != MPA_REQ_SENT) 3885 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 3886 } else 3887 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 3888 sched(dev, skb); 3889 return 0; 3890 } 3891 3892 /* 3893 * Most upcalls from the T4 Core go to sched() to 3894 * schedule the processing on a work queue. 3895 */ 3896 c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = { 3897 [CPL_ACT_ESTABLISH] = sched, 3898 [CPL_ACT_OPEN_RPL] = sched, 3899 [CPL_RX_DATA] = sched, 3900 [CPL_ABORT_RPL_RSS] = sched, 3901 [CPL_ABORT_RPL] = sched, 3902 [CPL_PASS_OPEN_RPL] = sched, 3903 [CPL_CLOSE_LISTSRV_RPL] = sched, 3904 [CPL_PASS_ACCEPT_REQ] = sched, 3905 [CPL_PASS_ESTABLISH] = sched, 3906 [CPL_PEER_CLOSE] = sched, 3907 [CPL_CLOSE_CON_RPL] = sched, 3908 [CPL_ABORT_REQ_RSS] = peer_abort_intr, 3909 [CPL_RDMA_TERMINATE] = sched, 3910 [CPL_FW4_ACK] = sched, 3911 [CPL_SET_TCB_RPL] = set_tcb_rpl, 3912 [CPL_FW6_MSG] = fw6_msg, 3913 [CPL_RX_PKT] = sched 3914 }; 3915 3916 int __init c4iw_cm_init(void) 3917 { 3918 spin_lock_init(&timeout_lock); 3919 skb_queue_head_init(&rxq); 3920 3921 workq = create_singlethread_workqueue("iw_cxgb4"); 3922 if (!workq) 3923 return -ENOMEM; 3924 3925 return 0; 3926 } 3927 3928 void c4iw_cm_term(void) 3929 { 3930 WARN_ON(!list_empty(&timeout_list)); 3931 flush_workqueue(workq); 3932 destroy_workqueue(workq); 3933 } 3934