1 /* 2 * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/module.h> 33 #include <linux/list.h> 34 #include <linux/workqueue.h> 35 #include <linux/skbuff.h> 36 #include <linux/timer.h> 37 #include <linux/notifier.h> 38 #include <linux/inetdevice.h> 39 #include <linux/ip.h> 40 #include <linux/tcp.h> 41 #include <linux/if_vlan.h> 42 43 #include <net/neighbour.h> 44 #include <net/netevent.h> 45 #include <net/route.h> 46 #include <net/tcp.h> 47 #include <net/ip6_route.h> 48 #include <net/addrconf.h> 49 50 #include <rdma/ib_addr.h> 51 52 #include "iw_cxgb4.h" 53 54 static char *states[] = { 55 "idle", 56 "listen", 57 "connecting", 58 "mpa_wait_req", 59 "mpa_req_sent", 60 "mpa_req_rcvd", 61 "mpa_rep_sent", 62 "fpdu_mode", 63 "aborting", 64 "closing", 65 "moribund", 66 "dead", 67 NULL, 68 }; 69 70 static int nocong; 71 module_param(nocong, int, 0644); 72 MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)"); 73 74 static int enable_ecn; 75 module_param(enable_ecn, int, 0644); 76 MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)"); 77 78 static int dack_mode = 1; 79 module_param(dack_mode, int, 0644); 80 MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)"); 81 82 uint c4iw_max_read_depth = 32; 83 module_param(c4iw_max_read_depth, int, 0644); 84 MODULE_PARM_DESC(c4iw_max_read_depth, 85 "Per-connection max ORD/IRD (default=32)"); 86 87 static int enable_tcp_timestamps; 88 module_param(enable_tcp_timestamps, int, 0644); 89 MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); 90 91 static int enable_tcp_sack; 92 module_param(enable_tcp_sack, int, 0644); 93 MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)"); 94 95 static int enable_tcp_window_scaling = 1; 96 module_param(enable_tcp_window_scaling, int, 0644); 97 MODULE_PARM_DESC(enable_tcp_window_scaling, 98 "Enable tcp window scaling (default=1)"); 99 100 int c4iw_debug; 101 module_param(c4iw_debug, int, 0644); 102 MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)"); 103 104 static int peer2peer = 1; 105 module_param(peer2peer, int, 0644); 106 MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)"); 107 108 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; 109 module_param(p2p_type, int, 0644); 110 MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: " 111 "1=RDMA_READ 0=RDMA_WRITE (default 1)"); 112 113 static int ep_timeout_secs = 60; 114 module_param(ep_timeout_secs, int, 0644); 115 MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " 116 "in seconds (default=60)"); 117 118 static int mpa_rev = 1; 119 module_param(mpa_rev, int, 0644); 120 MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " 121 "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft" 122 " compliant (default=1)"); 123 124 static int markers_enabled; 125 module_param(markers_enabled, int, 0644); 126 MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)"); 127 128 static int crc_enabled = 1; 129 module_param(crc_enabled, int, 0644); 130 MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)"); 131 132 static int rcv_win = 256 * 1024; 133 module_param(rcv_win, int, 0644); 134 MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)"); 135 136 static int snd_win = 128 * 1024; 137 module_param(snd_win, int, 0644); 138 MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)"); 139 140 static struct workqueue_struct *workq; 141 142 static struct sk_buff_head rxq; 143 144 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); 145 static void ep_timeout(unsigned long arg); 146 static void connect_reply_upcall(struct c4iw_ep *ep, int status); 147 148 static LIST_HEAD(timeout_list); 149 static spinlock_t timeout_lock; 150 151 static void deref_qp(struct c4iw_ep *ep) 152 { 153 c4iw_qp_rem_ref(&ep->com.qp->ibqp); 154 clear_bit(QP_REFERENCED, &ep->com.flags); 155 } 156 157 static void ref_qp(struct c4iw_ep *ep) 158 { 159 set_bit(QP_REFERENCED, &ep->com.flags); 160 c4iw_qp_add_ref(&ep->com.qp->ibqp); 161 } 162 163 static void start_ep_timer(struct c4iw_ep *ep) 164 { 165 PDBG("%s ep %p\n", __func__, ep); 166 if (timer_pending(&ep->timer)) { 167 pr_err("%s timer already started! ep %p\n", 168 __func__, ep); 169 return; 170 } 171 clear_bit(TIMEOUT, &ep->com.flags); 172 c4iw_get_ep(&ep->com); 173 ep->timer.expires = jiffies + ep_timeout_secs * HZ; 174 ep->timer.data = (unsigned long)ep; 175 ep->timer.function = ep_timeout; 176 add_timer(&ep->timer); 177 } 178 179 static int stop_ep_timer(struct c4iw_ep *ep) 180 { 181 PDBG("%s ep %p stopping\n", __func__, ep); 182 del_timer_sync(&ep->timer); 183 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 184 c4iw_put_ep(&ep->com); 185 return 0; 186 } 187 return 1; 188 } 189 190 static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, 191 struct l2t_entry *l2e) 192 { 193 int error = 0; 194 195 if (c4iw_fatal_error(rdev)) { 196 kfree_skb(skb); 197 PDBG("%s - device in error state - dropping\n", __func__); 198 return -EIO; 199 } 200 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); 201 if (error < 0) 202 kfree_skb(skb); 203 return error < 0 ? error : 0; 204 } 205 206 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) 207 { 208 int error = 0; 209 210 if (c4iw_fatal_error(rdev)) { 211 kfree_skb(skb); 212 PDBG("%s - device in error state - dropping\n", __func__); 213 return -EIO; 214 } 215 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); 216 if (error < 0) 217 kfree_skb(skb); 218 return error < 0 ? error : 0; 219 } 220 221 static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) 222 { 223 struct cpl_tid_release *req; 224 225 skb = get_skb(skb, sizeof *req, GFP_KERNEL); 226 if (!skb) 227 return; 228 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); 229 INIT_TP_WR(req, hwtid); 230 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); 231 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); 232 c4iw_ofld_send(rdev, skb); 233 return; 234 } 235 236 static void set_emss(struct c4iw_ep *ep, u16 opt) 237 { 238 ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 239 ((AF_INET == ep->com.remote_addr.ss_family) ? 240 sizeof(struct iphdr) : sizeof(struct ipv6hdr)) - 241 sizeof(struct tcphdr); 242 ep->mss = ep->emss; 243 if (GET_TCPOPT_TSTAMP(opt)) 244 ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4); 245 if (ep->emss < 128) 246 ep->emss = 128; 247 if (ep->emss & 7) 248 PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n", 249 GET_TCPOPT_MSS(opt), ep->mss, ep->emss); 250 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt), 251 ep->mss, ep->emss); 252 } 253 254 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) 255 { 256 enum c4iw_ep_state state; 257 258 mutex_lock(&epc->mutex); 259 state = epc->state; 260 mutex_unlock(&epc->mutex); 261 return state; 262 } 263 264 static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 265 { 266 epc->state = new; 267 } 268 269 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 270 { 271 mutex_lock(&epc->mutex); 272 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]); 273 __state_set(epc, new); 274 mutex_unlock(&epc->mutex); 275 return; 276 } 277 278 static void *alloc_ep(int size, gfp_t gfp) 279 { 280 struct c4iw_ep_common *epc; 281 282 epc = kzalloc(size, gfp); 283 if (epc) { 284 kref_init(&epc->kref); 285 mutex_init(&epc->mutex); 286 c4iw_init_wr_wait(&epc->wr_wait); 287 } 288 PDBG("%s alloc ep %p\n", __func__, epc); 289 return epc; 290 } 291 292 void _c4iw_free_ep(struct kref *kref) 293 { 294 struct c4iw_ep *ep; 295 296 ep = container_of(kref, struct c4iw_ep, com.kref); 297 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); 298 if (test_bit(QP_REFERENCED, &ep->com.flags)) 299 deref_qp(ep); 300 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { 301 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); 302 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); 303 dst_release(ep->dst); 304 cxgb4_l2t_release(ep->l2t); 305 } 306 if (test_bit(RELEASE_MAPINFO, &ep->com.flags)) { 307 print_addr(&ep->com, __func__, "remove_mapinfo/mapping"); 308 iwpm_remove_mapinfo(&ep->com.local_addr, 309 &ep->com.mapped_local_addr); 310 iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW); 311 } 312 kfree(ep); 313 } 314 315 static void release_ep_resources(struct c4iw_ep *ep) 316 { 317 set_bit(RELEASE_RESOURCES, &ep->com.flags); 318 c4iw_put_ep(&ep->com); 319 } 320 321 static int status2errno(int status) 322 { 323 switch (status) { 324 case CPL_ERR_NONE: 325 return 0; 326 case CPL_ERR_CONN_RESET: 327 return -ECONNRESET; 328 case CPL_ERR_ARP_MISS: 329 return -EHOSTUNREACH; 330 case CPL_ERR_CONN_TIMEDOUT: 331 return -ETIMEDOUT; 332 case CPL_ERR_TCAM_FULL: 333 return -ENOMEM; 334 case CPL_ERR_CONN_EXIST: 335 return -EADDRINUSE; 336 default: 337 return -EIO; 338 } 339 } 340 341 /* 342 * Try and reuse skbs already allocated... 343 */ 344 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) 345 { 346 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) { 347 skb_trim(skb, 0); 348 skb_get(skb); 349 skb_reset_transport_header(skb); 350 } else { 351 skb = alloc_skb(len, gfp); 352 } 353 t4_set_arp_err_handler(skb, NULL, NULL); 354 return skb; 355 } 356 357 static struct net_device *get_real_dev(struct net_device *egress_dev) 358 { 359 return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev; 360 } 361 362 static int our_interface(struct c4iw_dev *dev, struct net_device *egress_dev) 363 { 364 int i; 365 366 egress_dev = get_real_dev(egress_dev); 367 for (i = 0; i < dev->rdev.lldi.nports; i++) 368 if (dev->rdev.lldi.ports[i] == egress_dev) 369 return 1; 370 return 0; 371 } 372 373 static struct dst_entry *find_route6(struct c4iw_dev *dev, __u8 *local_ip, 374 __u8 *peer_ip, __be16 local_port, 375 __be16 peer_port, u8 tos, 376 __u32 sin6_scope_id) 377 { 378 struct dst_entry *dst = NULL; 379 380 if (IS_ENABLED(CONFIG_IPV6)) { 381 struct flowi6 fl6; 382 383 memset(&fl6, 0, sizeof(fl6)); 384 memcpy(&fl6.daddr, peer_ip, 16); 385 memcpy(&fl6.saddr, local_ip, 16); 386 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) 387 fl6.flowi6_oif = sin6_scope_id; 388 dst = ip6_route_output(&init_net, NULL, &fl6); 389 if (!dst) 390 goto out; 391 if (!our_interface(dev, ip6_dst_idev(dst)->dev) && 392 !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) { 393 dst_release(dst); 394 dst = NULL; 395 } 396 } 397 398 out: 399 return dst; 400 } 401 402 static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip, 403 __be32 peer_ip, __be16 local_port, 404 __be16 peer_port, u8 tos) 405 { 406 struct rtable *rt; 407 struct flowi4 fl4; 408 struct neighbour *n; 409 410 rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip, 411 peer_port, local_port, IPPROTO_TCP, 412 tos, 0); 413 if (IS_ERR(rt)) 414 return NULL; 415 n = dst_neigh_lookup(&rt->dst, &peer_ip); 416 if (!n) 417 return NULL; 418 if (!our_interface(dev, n->dev) && 419 !(n->dev->flags & IFF_LOOPBACK)) { 420 neigh_release(n); 421 dst_release(&rt->dst); 422 return NULL; 423 } 424 neigh_release(n); 425 return &rt->dst; 426 } 427 428 static void arp_failure_discard(void *handle, struct sk_buff *skb) 429 { 430 PDBG("%s c4iw_dev %p\n", __func__, handle); 431 kfree_skb(skb); 432 } 433 434 /* 435 * Handle an ARP failure for an active open. 436 */ 437 static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) 438 { 439 struct c4iw_ep *ep = handle; 440 441 printk(KERN_ERR MOD "ARP failure duing connect\n"); 442 kfree_skb(skb); 443 connect_reply_upcall(ep, -EHOSTUNREACH); 444 state_set(&ep->com, DEAD); 445 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 446 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 447 dst_release(ep->dst); 448 cxgb4_l2t_release(ep->l2t); 449 c4iw_put_ep(&ep->com); 450 } 451 452 /* 453 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant 454 * and send it along. 455 */ 456 static void abort_arp_failure(void *handle, struct sk_buff *skb) 457 { 458 struct c4iw_rdev *rdev = handle; 459 struct cpl_abort_req *req = cplhdr(skb); 460 461 PDBG("%s rdev %p\n", __func__, rdev); 462 req->cmd = CPL_ABORT_NO_RST; 463 c4iw_ofld_send(rdev, skb); 464 } 465 466 static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) 467 { 468 unsigned int flowclen = 80; 469 struct fw_flowc_wr *flowc; 470 int i; 471 472 skb = get_skb(skb, flowclen, GFP_KERNEL); 473 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); 474 475 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | 476 FW_FLOWC_WR_NPARAMS_V(8)); 477 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen, 478 16)) | FW_WR_FLOWID_V(ep->hwtid)); 479 480 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 481 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V 482 (ep->com.dev->rdev.lldi.pf)); 483 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 484 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); 485 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 486 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan); 487 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 488 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid); 489 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 490 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq); 491 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 492 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); 493 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 494 flowc->mnemval[6].val = cpu_to_be32(ep->snd_win); 495 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 496 flowc->mnemval[7].val = cpu_to_be32(ep->emss); 497 /* Pad WR to 16 byte boundary */ 498 flowc->mnemval[8].mnemonic = 0; 499 flowc->mnemval[8].val = 0; 500 for (i = 0; i < 9; i++) { 501 flowc->mnemval[i].r4[0] = 0; 502 flowc->mnemval[i].r4[1] = 0; 503 flowc->mnemval[i].r4[2] = 0; 504 } 505 506 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 507 c4iw_ofld_send(&ep->com.dev->rdev, skb); 508 } 509 510 static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp) 511 { 512 struct cpl_close_con_req *req; 513 struct sk_buff *skb; 514 int wrlen = roundup(sizeof *req, 16); 515 516 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 517 skb = get_skb(NULL, wrlen, gfp); 518 if (!skb) { 519 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); 520 return -ENOMEM; 521 } 522 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 523 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 524 req = (struct cpl_close_con_req *) skb_put(skb, wrlen); 525 memset(req, 0, wrlen); 526 INIT_TP_WR(req, ep->hwtid); 527 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, 528 ep->hwtid)); 529 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 530 } 531 532 static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) 533 { 534 struct cpl_abort_req *req; 535 int wrlen = roundup(sizeof *req, 16); 536 537 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 538 skb = get_skb(skb, wrlen, gfp); 539 if (!skb) { 540 printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 541 __func__); 542 return -ENOMEM; 543 } 544 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 545 t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure); 546 req = (struct cpl_abort_req *) skb_put(skb, wrlen); 547 memset(req, 0, wrlen); 548 INIT_TP_WR(req, ep->hwtid); 549 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); 550 req->cmd = CPL_ABORT_SEND_RST; 551 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 552 } 553 554 /* 555 * c4iw_form_pm_msg - Form a port mapper message with mapping info 556 */ 557 static void c4iw_form_pm_msg(struct c4iw_ep *ep, 558 struct iwpm_sa_data *pm_msg) 559 { 560 memcpy(&pm_msg->loc_addr, &ep->com.local_addr, 561 sizeof(ep->com.local_addr)); 562 memcpy(&pm_msg->rem_addr, &ep->com.remote_addr, 563 sizeof(ep->com.remote_addr)); 564 } 565 566 /* 567 * c4iw_form_reg_msg - Form a port mapper message with dev info 568 */ 569 static void c4iw_form_reg_msg(struct c4iw_dev *dev, 570 struct iwpm_dev_data *pm_msg) 571 { 572 memcpy(pm_msg->dev_name, dev->ibdev.name, IWPM_DEVNAME_SIZE); 573 memcpy(pm_msg->if_name, dev->rdev.lldi.ports[0]->name, 574 IWPM_IFNAME_SIZE); 575 } 576 577 static void c4iw_record_pm_msg(struct c4iw_ep *ep, 578 struct iwpm_sa_data *pm_msg) 579 { 580 memcpy(&ep->com.mapped_local_addr, &pm_msg->mapped_loc_addr, 581 sizeof(ep->com.mapped_local_addr)); 582 memcpy(&ep->com.mapped_remote_addr, &pm_msg->mapped_rem_addr, 583 sizeof(ep->com.mapped_remote_addr)); 584 } 585 586 static void best_mtu(const unsigned short *mtus, unsigned short mtu, 587 unsigned int *idx, int use_ts, int ipv6) 588 { 589 unsigned short hdr_size = (ipv6 ? 590 sizeof(struct ipv6hdr) : 591 sizeof(struct iphdr)) + 592 sizeof(struct tcphdr) + 593 (use_ts ? 594 round_up(TCPOLEN_TIMESTAMP, 4) : 0); 595 unsigned short data_size = mtu - hdr_size; 596 597 cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx); 598 } 599 600 static int send_connect(struct c4iw_ep *ep) 601 { 602 struct cpl_act_open_req *req; 603 struct cpl_t5_act_open_req *t5_req; 604 struct cpl_act_open_req6 *req6; 605 struct cpl_t5_act_open_req6 *t5_req6; 606 struct sk_buff *skb; 607 u64 opt0; 608 u32 opt2; 609 unsigned int mtu_idx; 610 int wscale; 611 int wrlen; 612 int sizev4 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ? 613 sizeof(struct cpl_act_open_req) : 614 sizeof(struct cpl_t5_act_open_req); 615 int sizev6 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ? 616 sizeof(struct cpl_act_open_req6) : 617 sizeof(struct cpl_t5_act_open_req6); 618 struct sockaddr_in *la = (struct sockaddr_in *) 619 &ep->com.mapped_local_addr; 620 struct sockaddr_in *ra = (struct sockaddr_in *) 621 &ep->com.mapped_remote_addr; 622 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *) 623 &ep->com.mapped_local_addr; 624 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *) 625 &ep->com.mapped_remote_addr; 626 int win; 627 628 wrlen = (ep->com.remote_addr.ss_family == AF_INET) ? 629 roundup(sizev4, 16) : 630 roundup(sizev6, 16); 631 632 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); 633 634 skb = get_skb(NULL, wrlen, GFP_KERNEL); 635 if (!skb) { 636 printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 637 __func__); 638 return -ENOMEM; 639 } 640 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 641 642 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, 643 enable_tcp_timestamps, 644 (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1); 645 wscale = compute_wscale(rcv_win); 646 647 /* 648 * Specify the largest window that will fit in opt0. The 649 * remainder will be specified in the rx_data_ack. 650 */ 651 win = ep->rcv_win >> 10; 652 if (win > RCV_BUFSIZ_M) 653 win = RCV_BUFSIZ_M; 654 655 opt0 = (nocong ? NO_CONG(1) : 0) | 656 KEEP_ALIVE_F | 657 DELACK(1) | 658 WND_SCALE_V(wscale) | 659 MSS_IDX_V(mtu_idx) | 660 L2T_IDX_V(ep->l2t->idx) | 661 TX_CHAN_V(ep->tx_chan) | 662 SMAC_SEL_V(ep->smac_idx) | 663 DSCP(ep->tos) | 664 ULP_MODE_V(ULP_MODE_TCPDDP) | 665 RCV_BUFSIZ_V(win); 666 opt2 = RX_CHANNEL_V(0) | 667 CCTRL_ECN(enable_ecn) | 668 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid); 669 if (enable_tcp_timestamps) 670 opt2 |= TSTAMPS_EN(1); 671 if (enable_tcp_sack) 672 opt2 |= SACK_EN(1); 673 if (wscale && enable_tcp_window_scaling) 674 opt2 |= WND_SCALE_EN_F; 675 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 676 opt2 |= T5_OPT_2_VALID_F; 677 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); 678 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ 679 } 680 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure); 681 682 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { 683 if (ep->com.remote_addr.ss_family == AF_INET) { 684 req = (struct cpl_act_open_req *) skb_put(skb, wrlen); 685 INIT_TP_WR(req, 0); 686 OPCODE_TID(req) = cpu_to_be32( 687 MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 688 ((ep->rss_qid << 14) | ep->atid))); 689 req->local_port = la->sin_port; 690 req->peer_port = ra->sin_port; 691 req->local_ip = la->sin_addr.s_addr; 692 req->peer_ip = ra->sin_addr.s_addr; 693 req->opt0 = cpu_to_be64(opt0); 694 req->params = cpu_to_be32(cxgb4_select_ntuple( 695 ep->com.dev->rdev.lldi.ports[0], 696 ep->l2t)); 697 req->opt2 = cpu_to_be32(opt2); 698 } else { 699 req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen); 700 701 INIT_TP_WR(req6, 0); 702 OPCODE_TID(req6) = cpu_to_be32( 703 MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 704 ((ep->rss_qid<<14)|ep->atid))); 705 req6->local_port = la6->sin6_port; 706 req6->peer_port = ra6->sin6_port; 707 req6->local_ip_hi = *((__be64 *) 708 (la6->sin6_addr.s6_addr)); 709 req6->local_ip_lo = *((__be64 *) 710 (la6->sin6_addr.s6_addr + 8)); 711 req6->peer_ip_hi = *((__be64 *) 712 (ra6->sin6_addr.s6_addr)); 713 req6->peer_ip_lo = *((__be64 *) 714 (ra6->sin6_addr.s6_addr + 8)); 715 req6->opt0 = cpu_to_be64(opt0); 716 req6->params = cpu_to_be32(cxgb4_select_ntuple( 717 ep->com.dev->rdev.lldi.ports[0], 718 ep->l2t)); 719 req6->opt2 = cpu_to_be32(opt2); 720 } 721 } else { 722 u32 isn = (prandom_u32() & ~7UL) - 1; 723 724 if (peer2peer) 725 isn += 4; 726 727 if (ep->com.remote_addr.ss_family == AF_INET) { 728 t5_req = (struct cpl_t5_act_open_req *) 729 skb_put(skb, wrlen); 730 INIT_TP_WR(t5_req, 0); 731 OPCODE_TID(t5_req) = cpu_to_be32( 732 MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 733 ((ep->rss_qid << 14) | ep->atid))); 734 t5_req->local_port = la->sin_port; 735 t5_req->peer_port = ra->sin_port; 736 t5_req->local_ip = la->sin_addr.s_addr; 737 t5_req->peer_ip = ra->sin_addr.s_addr; 738 t5_req->opt0 = cpu_to_be64(opt0); 739 t5_req->params = cpu_to_be64(FILTER_TUPLE_V( 740 cxgb4_select_ntuple( 741 ep->com.dev->rdev.lldi.ports[0], 742 ep->l2t))); 743 t5_req->rsvd = cpu_to_be32(isn); 744 PDBG("%s snd_isn %u\n", __func__, 745 be32_to_cpu(t5_req->rsvd)); 746 t5_req->opt2 = cpu_to_be32(opt2); 747 } else { 748 t5_req6 = (struct cpl_t5_act_open_req6 *) 749 skb_put(skb, wrlen); 750 INIT_TP_WR(t5_req6, 0); 751 OPCODE_TID(t5_req6) = cpu_to_be32( 752 MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 753 ((ep->rss_qid<<14)|ep->atid))); 754 t5_req6->local_port = la6->sin6_port; 755 t5_req6->peer_port = ra6->sin6_port; 756 t5_req6->local_ip_hi = *((__be64 *) 757 (la6->sin6_addr.s6_addr)); 758 t5_req6->local_ip_lo = *((__be64 *) 759 (la6->sin6_addr.s6_addr + 8)); 760 t5_req6->peer_ip_hi = *((__be64 *) 761 (ra6->sin6_addr.s6_addr)); 762 t5_req6->peer_ip_lo = *((__be64 *) 763 (ra6->sin6_addr.s6_addr + 8)); 764 t5_req6->opt0 = cpu_to_be64(opt0); 765 t5_req6->params = cpu_to_be64(FILTER_TUPLE_V( 766 cxgb4_select_ntuple( 767 ep->com.dev->rdev.lldi.ports[0], 768 ep->l2t))); 769 t5_req6->rsvd = cpu_to_be32(isn); 770 PDBG("%s snd_isn %u\n", __func__, 771 be32_to_cpu(t5_req6->rsvd)); 772 t5_req6->opt2 = cpu_to_be32(opt2); 773 } 774 } 775 776 set_bit(ACT_OPEN_REQ, &ep->com.history); 777 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 778 } 779 780 static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, 781 u8 mpa_rev_to_use) 782 { 783 int mpalen, wrlen; 784 struct fw_ofld_tx_data_wr *req; 785 struct mpa_message *mpa; 786 struct mpa_v2_conn_params mpa_v2_params; 787 788 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 789 790 BUG_ON(skb_cloned(skb)); 791 792 mpalen = sizeof(*mpa) + ep->plen; 793 if (mpa_rev_to_use == 2) 794 mpalen += sizeof(struct mpa_v2_conn_params); 795 wrlen = roundup(mpalen + sizeof *req, 16); 796 skb = get_skb(skb, wrlen, GFP_KERNEL); 797 if (!skb) { 798 connect_reply_upcall(ep, -ENOMEM); 799 return; 800 } 801 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 802 803 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 804 memset(req, 0, wrlen); 805 req->op_to_immdlen = cpu_to_be32( 806 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 807 FW_WR_COMPL_F | 808 FW_WR_IMMDLEN_V(mpalen)); 809 req->flowid_len16 = cpu_to_be32( 810 FW_WR_FLOWID_V(ep->hwtid) | 811 FW_WR_LEN16_V(wrlen >> 4)); 812 req->plen = cpu_to_be32(mpalen); 813 req->tunnel_to_proxy = cpu_to_be32( 814 FW_OFLD_TX_DATA_WR_FLUSH_F | 815 FW_OFLD_TX_DATA_WR_SHOVE_F); 816 817 mpa = (struct mpa_message *)(req + 1); 818 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); 819 mpa->flags = (crc_enabled ? MPA_CRC : 0) | 820 (markers_enabled ? MPA_MARKERS : 0) | 821 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); 822 mpa->private_data_size = htons(ep->plen); 823 mpa->revision = mpa_rev_to_use; 824 if (mpa_rev_to_use == 1) { 825 ep->tried_with_mpa_v1 = 1; 826 ep->retry_with_mpa_v1 = 0; 827 } 828 829 if (mpa_rev_to_use == 2) { 830 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 831 sizeof (struct mpa_v2_conn_params)); 832 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird, 833 ep->ord); 834 mpa_v2_params.ird = htons((u16)ep->ird); 835 mpa_v2_params.ord = htons((u16)ep->ord); 836 837 if (peer2peer) { 838 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 839 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) 840 mpa_v2_params.ord |= 841 htons(MPA_V2_RDMA_WRITE_RTR); 842 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) 843 mpa_v2_params.ord |= 844 htons(MPA_V2_RDMA_READ_RTR); 845 } 846 memcpy(mpa->private_data, &mpa_v2_params, 847 sizeof(struct mpa_v2_conn_params)); 848 849 if (ep->plen) 850 memcpy(mpa->private_data + 851 sizeof(struct mpa_v2_conn_params), 852 ep->mpa_pkt + sizeof(*mpa), ep->plen); 853 } else 854 if (ep->plen) 855 memcpy(mpa->private_data, 856 ep->mpa_pkt + sizeof(*mpa), ep->plen); 857 858 /* 859 * Reference the mpa skb. This ensures the data area 860 * will remain in memory until the hw acks the tx. 861 * Function fw4_ack() will deref it. 862 */ 863 skb_get(skb); 864 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 865 BUG_ON(ep->mpa_skb); 866 ep->mpa_skb = skb; 867 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 868 start_ep_timer(ep); 869 __state_set(&ep->com, MPA_REQ_SENT); 870 ep->mpa_attr.initiator = 1; 871 ep->snd_seq += mpalen; 872 return; 873 } 874 875 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) 876 { 877 int mpalen, wrlen; 878 struct fw_ofld_tx_data_wr *req; 879 struct mpa_message *mpa; 880 struct sk_buff *skb; 881 struct mpa_v2_conn_params mpa_v2_params; 882 883 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 884 885 mpalen = sizeof(*mpa) + plen; 886 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) 887 mpalen += sizeof(struct mpa_v2_conn_params); 888 wrlen = roundup(mpalen + sizeof *req, 16); 889 890 skb = get_skb(NULL, wrlen, GFP_KERNEL); 891 if (!skb) { 892 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); 893 return -ENOMEM; 894 } 895 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 896 897 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 898 memset(req, 0, wrlen); 899 req->op_to_immdlen = cpu_to_be32( 900 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 901 FW_WR_COMPL_F | 902 FW_WR_IMMDLEN_V(mpalen)); 903 req->flowid_len16 = cpu_to_be32( 904 FW_WR_FLOWID_V(ep->hwtid) | 905 FW_WR_LEN16_V(wrlen >> 4)); 906 req->plen = cpu_to_be32(mpalen); 907 req->tunnel_to_proxy = cpu_to_be32( 908 FW_OFLD_TX_DATA_WR_FLUSH_F | 909 FW_OFLD_TX_DATA_WR_SHOVE_F); 910 911 mpa = (struct mpa_message *)(req + 1); 912 memset(mpa, 0, sizeof(*mpa)); 913 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 914 mpa->flags = MPA_REJECT; 915 mpa->revision = ep->mpa_attr.version; 916 mpa->private_data_size = htons(plen); 917 918 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 919 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 920 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 921 sizeof (struct mpa_v2_conn_params)); 922 mpa_v2_params.ird = htons(((u16)ep->ird) | 923 (peer2peer ? MPA_V2_PEER2PEER_MODEL : 924 0)); 925 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? 926 (p2p_type == 927 FW_RI_INIT_P2PTYPE_RDMA_WRITE ? 928 MPA_V2_RDMA_WRITE_RTR : p2p_type == 929 FW_RI_INIT_P2PTYPE_READ_REQ ? 930 MPA_V2_RDMA_READ_RTR : 0) : 0)); 931 memcpy(mpa->private_data, &mpa_v2_params, 932 sizeof(struct mpa_v2_conn_params)); 933 934 if (ep->plen) 935 memcpy(mpa->private_data + 936 sizeof(struct mpa_v2_conn_params), pdata, plen); 937 } else 938 if (plen) 939 memcpy(mpa->private_data, pdata, plen); 940 941 /* 942 * Reference the mpa skb again. This ensures the data area 943 * will remain in memory until the hw acks the tx. 944 * Function fw4_ack() will deref it. 945 */ 946 skb_get(skb); 947 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 948 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 949 BUG_ON(ep->mpa_skb); 950 ep->mpa_skb = skb; 951 ep->snd_seq += mpalen; 952 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 953 } 954 955 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) 956 { 957 int mpalen, wrlen; 958 struct fw_ofld_tx_data_wr *req; 959 struct mpa_message *mpa; 960 struct sk_buff *skb; 961 struct mpa_v2_conn_params mpa_v2_params; 962 963 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 964 965 mpalen = sizeof(*mpa) + plen; 966 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) 967 mpalen += sizeof(struct mpa_v2_conn_params); 968 wrlen = roundup(mpalen + sizeof *req, 16); 969 970 skb = get_skb(NULL, wrlen, GFP_KERNEL); 971 if (!skb) { 972 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); 973 return -ENOMEM; 974 } 975 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 976 977 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); 978 memset(req, 0, wrlen); 979 req->op_to_immdlen = cpu_to_be32( 980 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 981 FW_WR_COMPL_F | 982 FW_WR_IMMDLEN_V(mpalen)); 983 req->flowid_len16 = cpu_to_be32( 984 FW_WR_FLOWID_V(ep->hwtid) | 985 FW_WR_LEN16_V(wrlen >> 4)); 986 req->plen = cpu_to_be32(mpalen); 987 req->tunnel_to_proxy = cpu_to_be32( 988 FW_OFLD_TX_DATA_WR_FLUSH_F | 989 FW_OFLD_TX_DATA_WR_SHOVE_F); 990 991 mpa = (struct mpa_message *)(req + 1); 992 memset(mpa, 0, sizeof(*mpa)); 993 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 994 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | 995 (markers_enabled ? MPA_MARKERS : 0); 996 mpa->revision = ep->mpa_attr.version; 997 mpa->private_data_size = htons(plen); 998 999 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1000 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 1001 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 1002 sizeof (struct mpa_v2_conn_params)); 1003 mpa_v2_params.ird = htons((u16)ep->ird); 1004 mpa_v2_params.ord = htons((u16)ep->ord); 1005 if (peer2peer && (ep->mpa_attr.p2p_type != 1006 FW_RI_INIT_P2PTYPE_DISABLED)) { 1007 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 1008 1009 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) 1010 mpa_v2_params.ord |= 1011 htons(MPA_V2_RDMA_WRITE_RTR); 1012 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) 1013 mpa_v2_params.ord |= 1014 htons(MPA_V2_RDMA_READ_RTR); 1015 } 1016 1017 memcpy(mpa->private_data, &mpa_v2_params, 1018 sizeof(struct mpa_v2_conn_params)); 1019 1020 if (ep->plen) 1021 memcpy(mpa->private_data + 1022 sizeof(struct mpa_v2_conn_params), pdata, plen); 1023 } else 1024 if (plen) 1025 memcpy(mpa->private_data, pdata, plen); 1026 1027 /* 1028 * Reference the mpa skb. This ensures the data area 1029 * will remain in memory until the hw acks the tx. 1030 * Function fw4_ack() will deref it. 1031 */ 1032 skb_get(skb); 1033 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 1034 ep->mpa_skb = skb; 1035 __state_set(&ep->com, MPA_REP_SENT); 1036 ep->snd_seq += mpalen; 1037 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1038 } 1039 1040 static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) 1041 { 1042 struct c4iw_ep *ep; 1043 struct cpl_act_establish *req = cplhdr(skb); 1044 unsigned int tid = GET_TID(req); 1045 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); 1046 struct tid_info *t = dev->rdev.lldi.tids; 1047 1048 ep = lookup_atid(t, atid); 1049 1050 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid, 1051 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); 1052 1053 mutex_lock(&ep->com.mutex); 1054 dst_confirm(ep->dst); 1055 1056 /* setup the hwtid for this connection */ 1057 ep->hwtid = tid; 1058 cxgb4_insert_tid(t, ep, tid); 1059 insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid); 1060 1061 ep->snd_seq = be32_to_cpu(req->snd_isn); 1062 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 1063 1064 set_emss(ep, ntohs(req->tcp_opt)); 1065 1066 /* dealloc the atid */ 1067 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); 1068 cxgb4_free_atid(t, atid); 1069 set_bit(ACT_ESTAB, &ep->com.history); 1070 1071 /* start MPA negotiation */ 1072 send_flowc(ep, NULL); 1073 if (ep->retry_with_mpa_v1) 1074 send_mpa_req(ep, skb, 1); 1075 else 1076 send_mpa_req(ep, skb, mpa_rev); 1077 mutex_unlock(&ep->com.mutex); 1078 return 0; 1079 } 1080 1081 static void close_complete_upcall(struct c4iw_ep *ep, int status) 1082 { 1083 struct iw_cm_event event; 1084 1085 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1086 memset(&event, 0, sizeof(event)); 1087 event.event = IW_CM_EVENT_CLOSE; 1088 event.status = status; 1089 if (ep->com.cm_id) { 1090 PDBG("close complete delivered ep %p cm_id %p tid %u\n", 1091 ep, ep->com.cm_id, ep->hwtid); 1092 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1093 ep->com.cm_id->rem_ref(ep->com.cm_id); 1094 ep->com.cm_id = NULL; 1095 set_bit(CLOSE_UPCALL, &ep->com.history); 1096 } 1097 } 1098 1099 static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) 1100 { 1101 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1102 __state_set(&ep->com, ABORTING); 1103 set_bit(ABORT_CONN, &ep->com.history); 1104 return send_abort(ep, skb, gfp); 1105 } 1106 1107 static void peer_close_upcall(struct c4iw_ep *ep) 1108 { 1109 struct iw_cm_event event; 1110 1111 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1112 memset(&event, 0, sizeof(event)); 1113 event.event = IW_CM_EVENT_DISCONNECT; 1114 if (ep->com.cm_id) { 1115 PDBG("peer close delivered ep %p cm_id %p tid %u\n", 1116 ep, ep->com.cm_id, ep->hwtid); 1117 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1118 set_bit(DISCONN_UPCALL, &ep->com.history); 1119 } 1120 } 1121 1122 static void peer_abort_upcall(struct c4iw_ep *ep) 1123 { 1124 struct iw_cm_event event; 1125 1126 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1127 memset(&event, 0, sizeof(event)); 1128 event.event = IW_CM_EVENT_CLOSE; 1129 event.status = -ECONNRESET; 1130 if (ep->com.cm_id) { 1131 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep, 1132 ep->com.cm_id, ep->hwtid); 1133 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1134 ep->com.cm_id->rem_ref(ep->com.cm_id); 1135 ep->com.cm_id = NULL; 1136 set_bit(ABORT_UPCALL, &ep->com.history); 1137 } 1138 } 1139 1140 static void connect_reply_upcall(struct c4iw_ep *ep, int status) 1141 { 1142 struct iw_cm_event event; 1143 1144 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status); 1145 memset(&event, 0, sizeof(event)); 1146 event.event = IW_CM_EVENT_CONNECT_REPLY; 1147 event.status = status; 1148 memcpy(&event.local_addr, &ep->com.local_addr, 1149 sizeof(ep->com.local_addr)); 1150 memcpy(&event.remote_addr, &ep->com.remote_addr, 1151 sizeof(ep->com.remote_addr)); 1152 1153 if ((status == 0) || (status == -ECONNREFUSED)) { 1154 if (!ep->tried_with_mpa_v1) { 1155 /* this means MPA_v2 is used */ 1156 event.private_data_len = ep->plen - 1157 sizeof(struct mpa_v2_conn_params); 1158 event.private_data = ep->mpa_pkt + 1159 sizeof(struct mpa_message) + 1160 sizeof(struct mpa_v2_conn_params); 1161 } else { 1162 /* this means MPA_v1 is used */ 1163 event.private_data_len = ep->plen; 1164 event.private_data = ep->mpa_pkt + 1165 sizeof(struct mpa_message); 1166 } 1167 } 1168 1169 PDBG("%s ep %p tid %u status %d\n", __func__, ep, 1170 ep->hwtid, status); 1171 set_bit(CONN_RPL_UPCALL, &ep->com.history); 1172 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1173 1174 if (status < 0) { 1175 ep->com.cm_id->rem_ref(ep->com.cm_id); 1176 ep->com.cm_id = NULL; 1177 } 1178 } 1179 1180 static int connect_request_upcall(struct c4iw_ep *ep) 1181 { 1182 struct iw_cm_event event; 1183 int ret; 1184 1185 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1186 memset(&event, 0, sizeof(event)); 1187 event.event = IW_CM_EVENT_CONNECT_REQUEST; 1188 memcpy(&event.local_addr, &ep->com.local_addr, 1189 sizeof(ep->com.local_addr)); 1190 memcpy(&event.remote_addr, &ep->com.remote_addr, 1191 sizeof(ep->com.remote_addr)); 1192 event.provider_data = ep; 1193 if (!ep->tried_with_mpa_v1) { 1194 /* this means MPA_v2 is used */ 1195 event.ord = ep->ord; 1196 event.ird = ep->ird; 1197 event.private_data_len = ep->plen - 1198 sizeof(struct mpa_v2_conn_params); 1199 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + 1200 sizeof(struct mpa_v2_conn_params); 1201 } else { 1202 /* this means MPA_v1 is used. Send max supported */ 1203 event.ord = cur_max_read_depth(ep->com.dev); 1204 event.ird = cur_max_read_depth(ep->com.dev); 1205 event.private_data_len = ep->plen; 1206 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 1207 } 1208 c4iw_get_ep(&ep->com); 1209 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id, 1210 &event); 1211 if (ret) 1212 c4iw_put_ep(&ep->com); 1213 set_bit(CONNREQ_UPCALL, &ep->com.history); 1214 c4iw_put_ep(&ep->parent_ep->com); 1215 return ret; 1216 } 1217 1218 static void established_upcall(struct c4iw_ep *ep) 1219 { 1220 struct iw_cm_event event; 1221 1222 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1223 memset(&event, 0, sizeof(event)); 1224 event.event = IW_CM_EVENT_ESTABLISHED; 1225 event.ird = ep->ird; 1226 event.ord = ep->ord; 1227 if (ep->com.cm_id) { 1228 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1229 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1230 set_bit(ESTAB_UPCALL, &ep->com.history); 1231 } 1232 } 1233 1234 static int update_rx_credits(struct c4iw_ep *ep, u32 credits) 1235 { 1236 struct cpl_rx_data_ack *req; 1237 struct sk_buff *skb; 1238 int wrlen = roundup(sizeof *req, 16); 1239 1240 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); 1241 skb = get_skb(NULL, wrlen, GFP_KERNEL); 1242 if (!skb) { 1243 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); 1244 return 0; 1245 } 1246 1247 /* 1248 * If we couldn't specify the entire rcv window at connection setup 1249 * due to the limit in the number of bits in the RCV_BUFSIZ field, 1250 * then add the overage in to the credits returned. 1251 */ 1252 if (ep->rcv_win > RCV_BUFSIZ_M * 1024) 1253 credits += ep->rcv_win - RCV_BUFSIZ_M * 1024; 1254 1255 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); 1256 memset(req, 0, wrlen); 1257 INIT_TP_WR(req, ep->hwtid); 1258 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 1259 ep->hwtid)); 1260 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F | 1261 F_RX_DACK_CHANGE | 1262 V_RX_DACK_MODE(dack_mode)); 1263 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); 1264 c4iw_ofld_send(&ep->com.dev->rdev, skb); 1265 return credits; 1266 } 1267 1268 #define RELAXED_IRD_NEGOTIATION 1 1269 1270 static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) 1271 { 1272 struct mpa_message *mpa; 1273 struct mpa_v2_conn_params *mpa_v2_params; 1274 u16 plen; 1275 u16 resp_ird, resp_ord; 1276 u8 rtr_mismatch = 0, insuff_ird = 0; 1277 struct c4iw_qp_attributes attrs; 1278 enum c4iw_qp_attr_mask mask; 1279 int err; 1280 int disconnect = 0; 1281 1282 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1283 1284 /* 1285 * Stop mpa timer. If it expired, then 1286 * we ignore the MPA reply. process_timeout() 1287 * will abort the connection. 1288 */ 1289 if (stop_ep_timer(ep)) 1290 return 0; 1291 1292 /* 1293 * If we get more than the supported amount of private data 1294 * then we must fail this connection. 1295 */ 1296 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 1297 err = -EINVAL; 1298 goto err; 1299 } 1300 1301 /* 1302 * copy the new data into our accumulation buffer. 1303 */ 1304 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 1305 skb->len); 1306 ep->mpa_pkt_len += skb->len; 1307 1308 /* 1309 * if we don't even have the mpa message, then bail. 1310 */ 1311 if (ep->mpa_pkt_len < sizeof(*mpa)) 1312 return 0; 1313 mpa = (struct mpa_message *) ep->mpa_pkt; 1314 1315 /* Validate MPA header. */ 1316 if (mpa->revision > mpa_rev) { 1317 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," 1318 " Received = %d\n", __func__, mpa_rev, mpa->revision); 1319 err = -EPROTO; 1320 goto err; 1321 } 1322 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { 1323 err = -EPROTO; 1324 goto err; 1325 } 1326 1327 plen = ntohs(mpa->private_data_size); 1328 1329 /* 1330 * Fail if there's too much private data. 1331 */ 1332 if (plen > MPA_MAX_PRIVATE_DATA) { 1333 err = -EPROTO; 1334 goto err; 1335 } 1336 1337 /* 1338 * If plen does not account for pkt size 1339 */ 1340 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1341 err = -EPROTO; 1342 goto err; 1343 } 1344 1345 ep->plen = (u8) plen; 1346 1347 /* 1348 * If we don't have all the pdata yet, then bail. 1349 * We'll continue process when more data arrives. 1350 */ 1351 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1352 return 0; 1353 1354 if (mpa->flags & MPA_REJECT) { 1355 err = -ECONNREFUSED; 1356 goto err; 1357 } 1358 1359 /* 1360 * If we get here we have accumulated the entire mpa 1361 * start reply message including private data. And 1362 * the MPA header is valid. 1363 */ 1364 __state_set(&ep->com, FPDU_MODE); 1365 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1366 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1367 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1368 ep->mpa_attr.version = mpa->revision; 1369 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1370 1371 if (mpa->revision == 2) { 1372 ep->mpa_attr.enhanced_rdma_conn = 1373 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1374 if (ep->mpa_attr.enhanced_rdma_conn) { 1375 mpa_v2_params = (struct mpa_v2_conn_params *) 1376 (ep->mpa_pkt + sizeof(*mpa)); 1377 resp_ird = ntohs(mpa_v2_params->ird) & 1378 MPA_V2_IRD_ORD_MASK; 1379 resp_ord = ntohs(mpa_v2_params->ord) & 1380 MPA_V2_IRD_ORD_MASK; 1381 PDBG("%s responder ird %u ord %u ep ird %u ord %u\n", 1382 __func__, resp_ird, resp_ord, ep->ird, ep->ord); 1383 1384 /* 1385 * This is a double-check. Ideally, below checks are 1386 * not required since ird/ord stuff has been taken 1387 * care of in c4iw_accept_cr 1388 */ 1389 if (ep->ird < resp_ord) { 1390 if (RELAXED_IRD_NEGOTIATION && resp_ord <= 1391 ep->com.dev->rdev.lldi.max_ordird_qp) 1392 ep->ird = resp_ord; 1393 else 1394 insuff_ird = 1; 1395 } else if (ep->ird > resp_ord) { 1396 ep->ird = resp_ord; 1397 } 1398 if (ep->ord > resp_ird) { 1399 if (RELAXED_IRD_NEGOTIATION) 1400 ep->ord = resp_ird; 1401 else 1402 insuff_ird = 1; 1403 } 1404 if (insuff_ird) { 1405 err = -ENOMEM; 1406 ep->ird = resp_ord; 1407 ep->ord = resp_ird; 1408 } 1409 1410 if (ntohs(mpa_v2_params->ird) & 1411 MPA_V2_PEER2PEER_MODEL) { 1412 if (ntohs(mpa_v2_params->ord) & 1413 MPA_V2_RDMA_WRITE_RTR) 1414 ep->mpa_attr.p2p_type = 1415 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1416 else if (ntohs(mpa_v2_params->ord) & 1417 MPA_V2_RDMA_READ_RTR) 1418 ep->mpa_attr.p2p_type = 1419 FW_RI_INIT_P2PTYPE_READ_REQ; 1420 } 1421 } 1422 } else if (mpa->revision == 1) 1423 if (peer2peer) 1424 ep->mpa_attr.p2p_type = p2p_type; 1425 1426 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 1427 "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = " 1428 "%d\n", __func__, ep->mpa_attr.crc_enabled, 1429 ep->mpa_attr.recv_marker_enabled, 1430 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1431 ep->mpa_attr.p2p_type, p2p_type); 1432 1433 /* 1434 * If responder's RTR does not match with that of initiator, assign 1435 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not 1436 * generated when moving QP to RTS state. 1437 * A TERM message will be sent after QP has moved to RTS state 1438 */ 1439 if ((ep->mpa_attr.version == 2) && peer2peer && 1440 (ep->mpa_attr.p2p_type != p2p_type)) { 1441 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1442 rtr_mismatch = 1; 1443 } 1444 1445 attrs.mpa_attr = ep->mpa_attr; 1446 attrs.max_ird = ep->ird; 1447 attrs.max_ord = ep->ord; 1448 attrs.llp_stream_handle = ep; 1449 attrs.next_state = C4IW_QP_STATE_RTS; 1450 1451 mask = C4IW_QP_ATTR_NEXT_STATE | 1452 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | 1453 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; 1454 1455 /* bind QP and TID with INIT_WR */ 1456 err = c4iw_modify_qp(ep->com.qp->rhp, 1457 ep->com.qp, mask, &attrs, 1); 1458 if (err) 1459 goto err; 1460 1461 /* 1462 * If responder's RTR requirement did not match with what initiator 1463 * supports, generate TERM message 1464 */ 1465 if (rtr_mismatch) { 1466 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__); 1467 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1468 attrs.ecode = MPA_NOMATCH_RTR; 1469 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1470 attrs.send_term = 1; 1471 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1472 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1473 err = -ENOMEM; 1474 disconnect = 1; 1475 goto out; 1476 } 1477 1478 /* 1479 * Generate TERM if initiator IRD is not sufficient for responder 1480 * provided ORD. Currently, we do the same behaviour even when 1481 * responder provided IRD is also not sufficient as regards to 1482 * initiator ORD. 1483 */ 1484 if (insuff_ird) { 1485 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n", 1486 __func__); 1487 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1488 attrs.ecode = MPA_INSUFF_IRD; 1489 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1490 attrs.send_term = 1; 1491 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1492 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1493 err = -ENOMEM; 1494 disconnect = 1; 1495 goto out; 1496 } 1497 goto out; 1498 err: 1499 __state_set(&ep->com, ABORTING); 1500 send_abort(ep, skb, GFP_KERNEL); 1501 out: 1502 connect_reply_upcall(ep, err); 1503 return disconnect; 1504 } 1505 1506 static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) 1507 { 1508 struct mpa_message *mpa; 1509 struct mpa_v2_conn_params *mpa_v2_params; 1510 u16 plen; 1511 1512 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1513 1514 /* 1515 * If we get more than the supported amount of private data 1516 * then we must fail this connection. 1517 */ 1518 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 1519 (void)stop_ep_timer(ep); 1520 abort_connection(ep, skb, GFP_KERNEL); 1521 return; 1522 } 1523 1524 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); 1525 1526 /* 1527 * Copy the new data into our accumulation buffer. 1528 */ 1529 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 1530 skb->len); 1531 ep->mpa_pkt_len += skb->len; 1532 1533 /* 1534 * If we don't even have the mpa message, then bail. 1535 * We'll continue process when more data arrives. 1536 */ 1537 if (ep->mpa_pkt_len < sizeof(*mpa)) 1538 return; 1539 1540 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); 1541 mpa = (struct mpa_message *) ep->mpa_pkt; 1542 1543 /* 1544 * Validate MPA Header. 1545 */ 1546 if (mpa->revision > mpa_rev) { 1547 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," 1548 " Received = %d\n", __func__, mpa_rev, mpa->revision); 1549 (void)stop_ep_timer(ep); 1550 abort_connection(ep, skb, GFP_KERNEL); 1551 return; 1552 } 1553 1554 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { 1555 (void)stop_ep_timer(ep); 1556 abort_connection(ep, skb, GFP_KERNEL); 1557 return; 1558 } 1559 1560 plen = ntohs(mpa->private_data_size); 1561 1562 /* 1563 * Fail if there's too much private data. 1564 */ 1565 if (plen > MPA_MAX_PRIVATE_DATA) { 1566 (void)stop_ep_timer(ep); 1567 abort_connection(ep, skb, GFP_KERNEL); 1568 return; 1569 } 1570 1571 /* 1572 * If plen does not account for pkt size 1573 */ 1574 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1575 (void)stop_ep_timer(ep); 1576 abort_connection(ep, skb, GFP_KERNEL); 1577 return; 1578 } 1579 ep->plen = (u8) plen; 1580 1581 /* 1582 * If we don't have all the pdata yet, then bail. 1583 */ 1584 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1585 return; 1586 1587 /* 1588 * If we get here we have accumulated the entire mpa 1589 * start reply message including private data. 1590 */ 1591 ep->mpa_attr.initiator = 0; 1592 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1593 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1594 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1595 ep->mpa_attr.version = mpa->revision; 1596 if (mpa->revision == 1) 1597 ep->tried_with_mpa_v1 = 1; 1598 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1599 1600 if (mpa->revision == 2) { 1601 ep->mpa_attr.enhanced_rdma_conn = 1602 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1603 if (ep->mpa_attr.enhanced_rdma_conn) { 1604 mpa_v2_params = (struct mpa_v2_conn_params *) 1605 (ep->mpa_pkt + sizeof(*mpa)); 1606 ep->ird = ntohs(mpa_v2_params->ird) & 1607 MPA_V2_IRD_ORD_MASK; 1608 ep->ord = ntohs(mpa_v2_params->ord) & 1609 MPA_V2_IRD_ORD_MASK; 1610 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird, 1611 ep->ord); 1612 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) 1613 if (peer2peer) { 1614 if (ntohs(mpa_v2_params->ord) & 1615 MPA_V2_RDMA_WRITE_RTR) 1616 ep->mpa_attr.p2p_type = 1617 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1618 else if (ntohs(mpa_v2_params->ord) & 1619 MPA_V2_RDMA_READ_RTR) 1620 ep->mpa_attr.p2p_type = 1621 FW_RI_INIT_P2PTYPE_READ_REQ; 1622 } 1623 } 1624 } else if (mpa->revision == 1) 1625 if (peer2peer) 1626 ep->mpa_attr.p2p_type = p2p_type; 1627 1628 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 1629 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__, 1630 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 1631 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1632 ep->mpa_attr.p2p_type); 1633 1634 /* 1635 * If the endpoint timer already expired, then we ignore 1636 * the start request. process_timeout() will abort 1637 * the connection. 1638 */ 1639 if (!stop_ep_timer(ep)) { 1640 __state_set(&ep->com, MPA_REQ_RCVD); 1641 1642 /* drive upcall */ 1643 mutex_lock_nested(&ep->parent_ep->com.mutex, 1644 SINGLE_DEPTH_NESTING); 1645 if (ep->parent_ep->com.state != DEAD) { 1646 if (connect_request_upcall(ep)) 1647 abort_connection(ep, skb, GFP_KERNEL); 1648 } else { 1649 abort_connection(ep, skb, GFP_KERNEL); 1650 } 1651 mutex_unlock(&ep->parent_ep->com.mutex); 1652 } 1653 return; 1654 } 1655 1656 static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) 1657 { 1658 struct c4iw_ep *ep; 1659 struct cpl_rx_data *hdr = cplhdr(skb); 1660 unsigned int dlen = ntohs(hdr->len); 1661 unsigned int tid = GET_TID(hdr); 1662 struct tid_info *t = dev->rdev.lldi.tids; 1663 __u8 status = hdr->status; 1664 int disconnect = 0; 1665 1666 ep = lookup_tid(t, tid); 1667 if (!ep) 1668 return 0; 1669 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); 1670 skb_pull(skb, sizeof(*hdr)); 1671 skb_trim(skb, dlen); 1672 mutex_lock(&ep->com.mutex); 1673 1674 /* update RX credits */ 1675 update_rx_credits(ep, dlen); 1676 1677 switch (ep->com.state) { 1678 case MPA_REQ_SENT: 1679 ep->rcv_seq += dlen; 1680 disconnect = process_mpa_reply(ep, skb); 1681 break; 1682 case MPA_REQ_WAIT: 1683 ep->rcv_seq += dlen; 1684 process_mpa_request(ep, skb); 1685 break; 1686 case FPDU_MODE: { 1687 struct c4iw_qp_attributes attrs; 1688 BUG_ON(!ep->com.qp); 1689 if (status) 1690 pr_err("%s Unexpected streaming data." \ 1691 " qpid %u ep %p state %d tid %u status %d\n", 1692 __func__, ep->com.qp->wq.sq.qid, ep, 1693 ep->com.state, ep->hwtid, status); 1694 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1695 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1696 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1697 disconnect = 1; 1698 break; 1699 } 1700 default: 1701 break; 1702 } 1703 mutex_unlock(&ep->com.mutex); 1704 if (disconnect) 1705 c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 1706 return 0; 1707 } 1708 1709 static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1710 { 1711 struct c4iw_ep *ep; 1712 struct cpl_abort_rpl_rss *rpl = cplhdr(skb); 1713 int release = 0; 1714 unsigned int tid = GET_TID(rpl); 1715 struct tid_info *t = dev->rdev.lldi.tids; 1716 1717 ep = lookup_tid(t, tid); 1718 if (!ep) { 1719 printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n"); 1720 return 0; 1721 } 1722 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1723 mutex_lock(&ep->com.mutex); 1724 switch (ep->com.state) { 1725 case ABORTING: 1726 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 1727 __state_set(&ep->com, DEAD); 1728 release = 1; 1729 break; 1730 default: 1731 printk(KERN_ERR "%s ep %p state %d\n", 1732 __func__, ep, ep->com.state); 1733 break; 1734 } 1735 mutex_unlock(&ep->com.mutex); 1736 1737 if (release) 1738 release_ep_resources(ep); 1739 return 0; 1740 } 1741 1742 static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) 1743 { 1744 struct sk_buff *skb; 1745 struct fw_ofld_connection_wr *req; 1746 unsigned int mtu_idx; 1747 int wscale; 1748 struct sockaddr_in *sin; 1749 int win; 1750 1751 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1752 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); 1753 memset(req, 0, sizeof(*req)); 1754 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); 1755 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); 1756 req->le.filter = cpu_to_be32(cxgb4_select_ntuple( 1757 ep->com.dev->rdev.lldi.ports[0], 1758 ep->l2t)); 1759 sin = (struct sockaddr_in *)&ep->com.mapped_local_addr; 1760 req->le.lport = sin->sin_port; 1761 req->le.u.ipv4.lip = sin->sin_addr.s_addr; 1762 sin = (struct sockaddr_in *)&ep->com.mapped_remote_addr; 1763 req->le.pport = sin->sin_port; 1764 req->le.u.ipv4.pip = sin->sin_addr.s_addr; 1765 req->tcb.t_state_to_astid = 1766 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_SENT) | 1767 FW_OFLD_CONNECTION_WR_ASTID_V(atid)); 1768 req->tcb.cplrxdataack_cplpassacceptrpl = 1769 htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F); 1770 req->tcb.tx_max = (__force __be32) jiffies; 1771 req->tcb.rcv_adv = htons(1); 1772 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, 1773 enable_tcp_timestamps, 1774 (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1); 1775 wscale = compute_wscale(rcv_win); 1776 1777 /* 1778 * Specify the largest window that will fit in opt0. The 1779 * remainder will be specified in the rx_data_ack. 1780 */ 1781 win = ep->rcv_win >> 10; 1782 if (win > RCV_BUFSIZ_M) 1783 win = RCV_BUFSIZ_M; 1784 1785 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) | 1786 (nocong ? NO_CONG(1) : 0) | 1787 KEEP_ALIVE_F | 1788 DELACK(1) | 1789 WND_SCALE_V(wscale) | 1790 MSS_IDX_V(mtu_idx) | 1791 L2T_IDX_V(ep->l2t->idx) | 1792 TX_CHAN_V(ep->tx_chan) | 1793 SMAC_SEL_V(ep->smac_idx) | 1794 DSCP(ep->tos) | 1795 ULP_MODE_V(ULP_MODE_TCPDDP) | 1796 RCV_BUFSIZ_V(win)); 1797 req->tcb.opt2 = (__force __be32) (PACE(1) | 1798 TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | 1799 RX_CHANNEL_V(0) | 1800 CCTRL_ECN(enable_ecn) | 1801 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid)); 1802 if (enable_tcp_timestamps) 1803 req->tcb.opt2 |= (__force __be32)TSTAMPS_EN(1); 1804 if (enable_tcp_sack) 1805 req->tcb.opt2 |= (__force __be32)SACK_EN(1); 1806 if (wscale && enable_tcp_window_scaling) 1807 req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F; 1808 req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0); 1809 req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2); 1810 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); 1811 set_bit(ACT_OFLD_CONN, &ep->com.history); 1812 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1813 } 1814 1815 /* 1816 * Return whether a failed active open has allocated a TID 1817 */ 1818 static inline int act_open_has_tid(int status) 1819 { 1820 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && 1821 status != CPL_ERR_ARP_MISS; 1822 } 1823 1824 /* Returns whether a CPL status conveys negative advice. 1825 */ 1826 static int is_neg_adv(unsigned int status) 1827 { 1828 return status == CPL_ERR_RTX_NEG_ADVICE || 1829 status == CPL_ERR_PERSIST_NEG_ADVICE || 1830 status == CPL_ERR_KEEPALV_NEG_ADVICE; 1831 } 1832 1833 static char *neg_adv_str(unsigned int status) 1834 { 1835 switch (status) { 1836 case CPL_ERR_RTX_NEG_ADVICE: 1837 return "Retransmit timeout"; 1838 case CPL_ERR_PERSIST_NEG_ADVICE: 1839 return "Persist timeout"; 1840 case CPL_ERR_KEEPALV_NEG_ADVICE: 1841 return "Keepalive timeout"; 1842 default: 1843 return "Unknown"; 1844 } 1845 } 1846 1847 static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi) 1848 { 1849 ep->snd_win = snd_win; 1850 ep->rcv_win = rcv_win; 1851 PDBG("%s snd_win %d rcv_win %d\n", __func__, ep->snd_win, ep->rcv_win); 1852 } 1853 1854 #define ACT_OPEN_RETRY_COUNT 2 1855 1856 static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, 1857 struct dst_entry *dst, struct c4iw_dev *cdev, 1858 bool clear_mpa_v1) 1859 { 1860 struct neighbour *n; 1861 int err, step; 1862 struct net_device *pdev; 1863 1864 n = dst_neigh_lookup(dst, peer_ip); 1865 if (!n) 1866 return -ENODEV; 1867 1868 rcu_read_lock(); 1869 err = -ENOMEM; 1870 if (n->dev->flags & IFF_LOOPBACK) { 1871 if (iptype == 4) 1872 pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip); 1873 else if (IS_ENABLED(CONFIG_IPV6)) 1874 for_each_netdev(&init_net, pdev) { 1875 if (ipv6_chk_addr(&init_net, 1876 (struct in6_addr *)peer_ip, 1877 pdev, 1)) 1878 break; 1879 } 1880 else 1881 pdev = NULL; 1882 1883 if (!pdev) { 1884 err = -ENODEV; 1885 goto out; 1886 } 1887 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, 1888 n, pdev, 0); 1889 if (!ep->l2t) 1890 goto out; 1891 ep->mtu = pdev->mtu; 1892 ep->tx_chan = cxgb4_port_chan(pdev); 1893 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; 1894 step = cdev->rdev.lldi.ntxq / 1895 cdev->rdev.lldi.nchan; 1896 ep->txq_idx = cxgb4_port_idx(pdev) * step; 1897 step = cdev->rdev.lldi.nrxq / 1898 cdev->rdev.lldi.nchan; 1899 ep->ctrlq_idx = cxgb4_port_idx(pdev); 1900 ep->rss_qid = cdev->rdev.lldi.rxq_ids[ 1901 cxgb4_port_idx(pdev) * step]; 1902 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev)); 1903 dev_put(pdev); 1904 } else { 1905 pdev = get_real_dev(n->dev); 1906 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, 1907 n, pdev, 0); 1908 if (!ep->l2t) 1909 goto out; 1910 ep->mtu = dst_mtu(dst); 1911 ep->tx_chan = cxgb4_port_chan(pdev); 1912 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; 1913 step = cdev->rdev.lldi.ntxq / 1914 cdev->rdev.lldi.nchan; 1915 ep->txq_idx = cxgb4_port_idx(pdev) * step; 1916 ep->ctrlq_idx = cxgb4_port_idx(pdev); 1917 step = cdev->rdev.lldi.nrxq / 1918 cdev->rdev.lldi.nchan; 1919 ep->rss_qid = cdev->rdev.lldi.rxq_ids[ 1920 cxgb4_port_idx(pdev) * step]; 1921 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev)); 1922 1923 if (clear_mpa_v1) { 1924 ep->retry_with_mpa_v1 = 0; 1925 ep->tried_with_mpa_v1 = 0; 1926 } 1927 } 1928 err = 0; 1929 out: 1930 rcu_read_unlock(); 1931 1932 neigh_release(n); 1933 1934 return err; 1935 } 1936 1937 static int c4iw_reconnect(struct c4iw_ep *ep) 1938 { 1939 int err = 0; 1940 struct sockaddr_in *laddr = (struct sockaddr_in *) 1941 &ep->com.cm_id->local_addr; 1942 struct sockaddr_in *raddr = (struct sockaddr_in *) 1943 &ep->com.cm_id->remote_addr; 1944 struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *) 1945 &ep->com.cm_id->local_addr; 1946 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *) 1947 &ep->com.cm_id->remote_addr; 1948 int iptype; 1949 __u8 *ra; 1950 1951 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); 1952 init_timer(&ep->timer); 1953 1954 /* 1955 * Allocate an active TID to initiate a TCP connection. 1956 */ 1957 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep); 1958 if (ep->atid == -1) { 1959 pr_err("%s - cannot alloc atid.\n", __func__); 1960 err = -ENOMEM; 1961 goto fail2; 1962 } 1963 insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid); 1964 1965 /* find a route */ 1966 if (ep->com.cm_id->local_addr.ss_family == AF_INET) { 1967 ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr, 1968 raddr->sin_addr.s_addr, laddr->sin_port, 1969 raddr->sin_port, 0); 1970 iptype = 4; 1971 ra = (__u8 *)&raddr->sin_addr; 1972 } else { 1973 ep->dst = find_route6(ep->com.dev, laddr6->sin6_addr.s6_addr, 1974 raddr6->sin6_addr.s6_addr, 1975 laddr6->sin6_port, raddr6->sin6_port, 0, 1976 raddr6->sin6_scope_id); 1977 iptype = 6; 1978 ra = (__u8 *)&raddr6->sin6_addr; 1979 } 1980 if (!ep->dst) { 1981 pr_err("%s - cannot find route.\n", __func__); 1982 err = -EHOSTUNREACH; 1983 goto fail3; 1984 } 1985 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false); 1986 if (err) { 1987 pr_err("%s - cannot alloc l2e.\n", __func__); 1988 goto fail4; 1989 } 1990 1991 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 1992 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 1993 ep->l2t->idx); 1994 1995 state_set(&ep->com, CONNECTING); 1996 ep->tos = 0; 1997 1998 /* send connect request to rnic */ 1999 err = send_connect(ep); 2000 if (!err) 2001 goto out; 2002 2003 cxgb4_l2t_release(ep->l2t); 2004 fail4: 2005 dst_release(ep->dst); 2006 fail3: 2007 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 2008 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 2009 fail2: 2010 /* 2011 * remember to send notification to upper layer. 2012 * We are in here so the upper layer is not aware that this is 2013 * re-connect attempt and so, upper layer is still waiting for 2014 * response of 1st connect request. 2015 */ 2016 connect_reply_upcall(ep, -ECONNRESET); 2017 c4iw_put_ep(&ep->com); 2018 out: 2019 return err; 2020 } 2021 2022 static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2023 { 2024 struct c4iw_ep *ep; 2025 struct cpl_act_open_rpl *rpl = cplhdr(skb); 2026 unsigned int atid = GET_TID_TID(GET_AOPEN_ATID( 2027 ntohl(rpl->atid_status))); 2028 struct tid_info *t = dev->rdev.lldi.tids; 2029 int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status)); 2030 struct sockaddr_in *la; 2031 struct sockaddr_in *ra; 2032 struct sockaddr_in6 *la6; 2033 struct sockaddr_in6 *ra6; 2034 2035 ep = lookup_atid(t, atid); 2036 la = (struct sockaddr_in *)&ep->com.mapped_local_addr; 2037 ra = (struct sockaddr_in *)&ep->com.mapped_remote_addr; 2038 la6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr; 2039 ra6 = (struct sockaddr_in6 *)&ep->com.mapped_remote_addr; 2040 2041 PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, 2042 status, status2errno(status)); 2043 2044 if (is_neg_adv(status)) { 2045 dev_warn(&dev->rdev.lldi.pdev->dev, 2046 "Connection problems for atid %u status %u (%s)\n", 2047 atid, status, neg_adv_str(status)); 2048 return 0; 2049 } 2050 2051 set_bit(ACT_OPEN_RPL, &ep->com.history); 2052 2053 /* 2054 * Log interesting failures. 2055 */ 2056 switch (status) { 2057 case CPL_ERR_CONN_RESET: 2058 case CPL_ERR_CONN_TIMEDOUT: 2059 break; 2060 case CPL_ERR_TCAM_FULL: 2061 mutex_lock(&dev->rdev.stats.lock); 2062 dev->rdev.stats.tcam_full++; 2063 mutex_unlock(&dev->rdev.stats.lock); 2064 if (ep->com.local_addr.ss_family == AF_INET && 2065 dev->rdev.lldi.enable_fw_ofld_conn) { 2066 send_fw_act_open_req(ep, 2067 GET_TID_TID(GET_AOPEN_ATID( 2068 ntohl(rpl->atid_status)))); 2069 return 0; 2070 } 2071 break; 2072 case CPL_ERR_CONN_EXIST: 2073 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 2074 set_bit(ACT_RETRY_INUSE, &ep->com.history); 2075 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, 2076 atid); 2077 cxgb4_free_atid(t, atid); 2078 dst_release(ep->dst); 2079 cxgb4_l2t_release(ep->l2t); 2080 c4iw_reconnect(ep); 2081 return 0; 2082 } 2083 break; 2084 default: 2085 if (ep->com.local_addr.ss_family == AF_INET) { 2086 pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n", 2087 atid, status, status2errno(status), 2088 &la->sin_addr.s_addr, ntohs(la->sin_port), 2089 &ra->sin_addr.s_addr, ntohs(ra->sin_port)); 2090 } else { 2091 pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n", 2092 atid, status, status2errno(status), 2093 la6->sin6_addr.s6_addr, ntohs(la6->sin6_port), 2094 ra6->sin6_addr.s6_addr, ntohs(ra6->sin6_port)); 2095 } 2096 break; 2097 } 2098 2099 connect_reply_upcall(ep, status2errno(status)); 2100 state_set(&ep->com, DEAD); 2101 2102 if (status && act_open_has_tid(status)) 2103 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); 2104 2105 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); 2106 cxgb4_free_atid(t, atid); 2107 dst_release(ep->dst); 2108 cxgb4_l2t_release(ep->l2t); 2109 c4iw_put_ep(&ep->com); 2110 2111 return 0; 2112 } 2113 2114 static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2115 { 2116 struct cpl_pass_open_rpl *rpl = cplhdr(skb); 2117 struct tid_info *t = dev->rdev.lldi.tids; 2118 unsigned int stid = GET_TID(rpl); 2119 struct c4iw_listen_ep *ep = lookup_stid(t, stid); 2120 2121 if (!ep) { 2122 PDBG("%s stid %d lookup failure!\n", __func__, stid); 2123 goto out; 2124 } 2125 PDBG("%s ep %p status %d error %d\n", __func__, ep, 2126 rpl->status, status2errno(rpl->status)); 2127 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); 2128 2129 out: 2130 return 0; 2131 } 2132 2133 static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2134 { 2135 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); 2136 struct tid_info *t = dev->rdev.lldi.tids; 2137 unsigned int stid = GET_TID(rpl); 2138 struct c4iw_listen_ep *ep = lookup_stid(t, stid); 2139 2140 PDBG("%s ep %p\n", __func__, ep); 2141 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); 2142 return 0; 2143 } 2144 2145 static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, 2146 struct cpl_pass_accept_req *req) 2147 { 2148 struct cpl_pass_accept_rpl *rpl; 2149 unsigned int mtu_idx; 2150 u64 opt0; 2151 u32 opt2; 2152 int wscale; 2153 struct cpl_t5_pass_accept_rpl *rpl5 = NULL; 2154 int win; 2155 2156 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2157 BUG_ON(skb_cloned(skb)); 2158 2159 skb_get(skb); 2160 rpl = cplhdr(skb); 2161 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 2162 skb_trim(skb, roundup(sizeof(*rpl5), 16)); 2163 rpl5 = (void *)rpl; 2164 INIT_TP_WR(rpl5, ep->hwtid); 2165 } else { 2166 skb_trim(skb, sizeof(*rpl)); 2167 INIT_TP_WR(rpl, ep->hwtid); 2168 } 2169 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, 2170 ep->hwtid)); 2171 2172 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, 2173 enable_tcp_timestamps && req->tcpopt.tstamp, 2174 (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1); 2175 wscale = compute_wscale(rcv_win); 2176 2177 /* 2178 * Specify the largest window that will fit in opt0. The 2179 * remainder will be specified in the rx_data_ack. 2180 */ 2181 win = ep->rcv_win >> 10; 2182 if (win > RCV_BUFSIZ_M) 2183 win = RCV_BUFSIZ_M; 2184 opt0 = (nocong ? NO_CONG(1) : 0) | 2185 KEEP_ALIVE_F | 2186 DELACK(1) | 2187 WND_SCALE_V(wscale) | 2188 MSS_IDX_V(mtu_idx) | 2189 L2T_IDX_V(ep->l2t->idx) | 2190 TX_CHAN_V(ep->tx_chan) | 2191 SMAC_SEL_V(ep->smac_idx) | 2192 DSCP(ep->tos >> 2) | 2193 ULP_MODE_V(ULP_MODE_TCPDDP) | 2194 RCV_BUFSIZ_V(win); 2195 opt2 = RX_CHANNEL_V(0) | 2196 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid); 2197 2198 if (enable_tcp_timestamps && req->tcpopt.tstamp) 2199 opt2 |= TSTAMPS_EN(1); 2200 if (enable_tcp_sack && req->tcpopt.sack) 2201 opt2 |= SACK_EN(1); 2202 if (wscale && enable_tcp_window_scaling) 2203 opt2 |= WND_SCALE_EN_F; 2204 if (enable_ecn) { 2205 const struct tcphdr *tcph; 2206 u32 hlen = ntohl(req->hdr_len); 2207 2208 tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) + 2209 G_IP_HDR_LEN(hlen); 2210 if (tcph->ece && tcph->cwr) 2211 opt2 |= CCTRL_ECN(1); 2212 } 2213 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 2214 u32 isn = (prandom_u32() & ~7UL) - 1; 2215 opt2 |= T5_OPT_2_VALID_F; 2216 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); 2217 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ 2218 rpl5 = (void *)rpl; 2219 memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16)); 2220 if (peer2peer) 2221 isn += 4; 2222 rpl5->iss = cpu_to_be32(isn); 2223 PDBG("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss)); 2224 } 2225 2226 rpl->opt0 = cpu_to_be64(opt0); 2227 rpl->opt2 = cpu_to_be32(opt2); 2228 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 2229 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 2230 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 2231 2232 return; 2233 } 2234 2235 static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb) 2236 { 2237 PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid); 2238 BUG_ON(skb_cloned(skb)); 2239 skb_trim(skb, sizeof(struct cpl_tid_release)); 2240 release_tid(&dev->rdev, hwtid, skb); 2241 return; 2242 } 2243 2244 static void get_4tuple(struct cpl_pass_accept_req *req, int *iptype, 2245 __u8 *local_ip, __u8 *peer_ip, 2246 __be16 *local_port, __be16 *peer_port) 2247 { 2248 int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len)); 2249 int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len)); 2250 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len); 2251 struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len); 2252 struct tcphdr *tcp = (struct tcphdr *) 2253 ((u8 *)(req + 1) + eth_len + ip_len); 2254 2255 if (ip->version == 4) { 2256 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__, 2257 ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source), 2258 ntohs(tcp->dest)); 2259 *iptype = 4; 2260 memcpy(peer_ip, &ip->saddr, 4); 2261 memcpy(local_ip, &ip->daddr, 4); 2262 } else { 2263 PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__, 2264 ip6->saddr.s6_addr, ip6->daddr.s6_addr, ntohs(tcp->source), 2265 ntohs(tcp->dest)); 2266 *iptype = 6; 2267 memcpy(peer_ip, ip6->saddr.s6_addr, 16); 2268 memcpy(local_ip, ip6->daddr.s6_addr, 16); 2269 } 2270 *peer_port = tcp->source; 2271 *local_port = tcp->dest; 2272 2273 return; 2274 } 2275 2276 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) 2277 { 2278 struct c4iw_ep *child_ep = NULL, *parent_ep; 2279 struct cpl_pass_accept_req *req = cplhdr(skb); 2280 unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); 2281 struct tid_info *t = dev->rdev.lldi.tids; 2282 unsigned int hwtid = GET_TID(req); 2283 struct dst_entry *dst; 2284 __u8 local_ip[16], peer_ip[16]; 2285 __be16 local_port, peer_port; 2286 int err; 2287 u16 peer_mss = ntohs(req->tcpopt.mss); 2288 int iptype; 2289 unsigned short hdrs; 2290 2291 parent_ep = lookup_stid(t, stid); 2292 if (!parent_ep) { 2293 PDBG("%s connect request on invalid stid %d\n", __func__, stid); 2294 goto reject; 2295 } 2296 2297 if (state_read(&parent_ep->com) != LISTEN) { 2298 printk(KERN_ERR "%s - listening ep not in LISTEN\n", 2299 __func__); 2300 goto reject; 2301 } 2302 2303 get_4tuple(req, &iptype, local_ip, peer_ip, &local_port, &peer_port); 2304 2305 /* Find output route */ 2306 if (iptype == 4) { 2307 PDBG("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n" 2308 , __func__, parent_ep, hwtid, 2309 local_ip, peer_ip, ntohs(local_port), 2310 ntohs(peer_port), peer_mss); 2311 dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip, 2312 local_port, peer_port, 2313 GET_POPEN_TOS(ntohl(req->tos_stid))); 2314 } else { 2315 PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n" 2316 , __func__, parent_ep, hwtid, 2317 local_ip, peer_ip, ntohs(local_port), 2318 ntohs(peer_port), peer_mss); 2319 dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port, 2320 PASS_OPEN_TOS(ntohl(req->tos_stid)), 2321 ((struct sockaddr_in6 *) 2322 &parent_ep->com.local_addr)->sin6_scope_id); 2323 } 2324 if (!dst) { 2325 printk(KERN_ERR MOD "%s - failed to find dst entry!\n", 2326 __func__); 2327 goto reject; 2328 } 2329 2330 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); 2331 if (!child_ep) { 2332 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", 2333 __func__); 2334 dst_release(dst); 2335 goto reject; 2336 } 2337 2338 err = import_ep(child_ep, iptype, peer_ip, dst, dev, false); 2339 if (err) { 2340 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", 2341 __func__); 2342 dst_release(dst); 2343 kfree(child_ep); 2344 goto reject; 2345 } 2346 2347 hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) + 2348 ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0); 2349 if (peer_mss && child_ep->mtu > (peer_mss + hdrs)) 2350 child_ep->mtu = peer_mss + hdrs; 2351 2352 state_set(&child_ep->com, CONNECTING); 2353 child_ep->com.dev = dev; 2354 child_ep->com.cm_id = NULL; 2355 if (iptype == 4) { 2356 struct sockaddr_in *sin = (struct sockaddr_in *) 2357 &child_ep->com.local_addr; 2358 sin->sin_family = PF_INET; 2359 sin->sin_port = local_port; 2360 sin->sin_addr.s_addr = *(__be32 *)local_ip; 2361 sin = (struct sockaddr_in *)&child_ep->com.remote_addr; 2362 sin->sin_family = PF_INET; 2363 sin->sin_port = peer_port; 2364 sin->sin_addr.s_addr = *(__be32 *)peer_ip; 2365 } else { 2366 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) 2367 &child_ep->com.local_addr; 2368 sin6->sin6_family = PF_INET6; 2369 sin6->sin6_port = local_port; 2370 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); 2371 sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr; 2372 sin6->sin6_family = PF_INET6; 2373 sin6->sin6_port = peer_port; 2374 memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16); 2375 } 2376 c4iw_get_ep(&parent_ep->com); 2377 child_ep->parent_ep = parent_ep; 2378 child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid)); 2379 child_ep->dst = dst; 2380 child_ep->hwtid = hwtid; 2381 2382 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__, 2383 child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid); 2384 2385 init_timer(&child_ep->timer); 2386 cxgb4_insert_tid(t, child_ep, hwtid); 2387 insert_handle(dev, &dev->hwtid_idr, child_ep, child_ep->hwtid); 2388 accept_cr(child_ep, skb, req); 2389 set_bit(PASS_ACCEPT_REQ, &child_ep->com.history); 2390 goto out; 2391 reject: 2392 reject_cr(dev, hwtid, skb); 2393 out: 2394 return 0; 2395 } 2396 2397 static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) 2398 { 2399 struct c4iw_ep *ep; 2400 struct cpl_pass_establish *req = cplhdr(skb); 2401 struct tid_info *t = dev->rdev.lldi.tids; 2402 unsigned int tid = GET_TID(req); 2403 2404 ep = lookup_tid(t, tid); 2405 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2406 ep->snd_seq = be32_to_cpu(req->snd_isn); 2407 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 2408 2409 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid, 2410 ntohs(req->tcp_opt)); 2411 2412 set_emss(ep, ntohs(req->tcp_opt)); 2413 2414 dst_confirm(ep->dst); 2415 state_set(&ep->com, MPA_REQ_WAIT); 2416 start_ep_timer(ep); 2417 send_flowc(ep, skb); 2418 set_bit(PASS_ESTAB, &ep->com.history); 2419 2420 return 0; 2421 } 2422 2423 static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) 2424 { 2425 struct cpl_peer_close *hdr = cplhdr(skb); 2426 struct c4iw_ep *ep; 2427 struct c4iw_qp_attributes attrs; 2428 int disconnect = 1; 2429 int release = 0; 2430 struct tid_info *t = dev->rdev.lldi.tids; 2431 unsigned int tid = GET_TID(hdr); 2432 int ret; 2433 2434 ep = lookup_tid(t, tid); 2435 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2436 dst_confirm(ep->dst); 2437 2438 set_bit(PEER_CLOSE, &ep->com.history); 2439 mutex_lock(&ep->com.mutex); 2440 switch (ep->com.state) { 2441 case MPA_REQ_WAIT: 2442 __state_set(&ep->com, CLOSING); 2443 break; 2444 case MPA_REQ_SENT: 2445 __state_set(&ep->com, CLOSING); 2446 connect_reply_upcall(ep, -ECONNRESET); 2447 break; 2448 case MPA_REQ_RCVD: 2449 2450 /* 2451 * We're gonna mark this puppy DEAD, but keep 2452 * the reference on it until the ULP accepts or 2453 * rejects the CR. Also wake up anyone waiting 2454 * in rdma connection migration (see c4iw_accept_cr()). 2455 */ 2456 __state_set(&ep->com, CLOSING); 2457 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 2458 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 2459 break; 2460 case MPA_REP_SENT: 2461 __state_set(&ep->com, CLOSING); 2462 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 2463 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 2464 break; 2465 case FPDU_MODE: 2466 start_ep_timer(ep); 2467 __state_set(&ep->com, CLOSING); 2468 attrs.next_state = C4IW_QP_STATE_CLOSING; 2469 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2470 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2471 if (ret != -ECONNRESET) { 2472 peer_close_upcall(ep); 2473 disconnect = 1; 2474 } 2475 break; 2476 case ABORTING: 2477 disconnect = 0; 2478 break; 2479 case CLOSING: 2480 __state_set(&ep->com, MORIBUND); 2481 disconnect = 0; 2482 break; 2483 case MORIBUND: 2484 (void)stop_ep_timer(ep); 2485 if (ep->com.cm_id && ep->com.qp) { 2486 attrs.next_state = C4IW_QP_STATE_IDLE; 2487 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2488 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2489 } 2490 close_complete_upcall(ep, 0); 2491 __state_set(&ep->com, DEAD); 2492 release = 1; 2493 disconnect = 0; 2494 break; 2495 case DEAD: 2496 disconnect = 0; 2497 break; 2498 default: 2499 BUG_ON(1); 2500 } 2501 mutex_unlock(&ep->com.mutex); 2502 if (disconnect) 2503 c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 2504 if (release) 2505 release_ep_resources(ep); 2506 return 0; 2507 } 2508 2509 static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) 2510 { 2511 struct cpl_abort_req_rss *req = cplhdr(skb); 2512 struct c4iw_ep *ep; 2513 struct cpl_abort_rpl *rpl; 2514 struct sk_buff *rpl_skb; 2515 struct c4iw_qp_attributes attrs; 2516 int ret; 2517 int release = 0; 2518 struct tid_info *t = dev->rdev.lldi.tids; 2519 unsigned int tid = GET_TID(req); 2520 2521 ep = lookup_tid(t, tid); 2522 if (is_neg_adv(req->status)) { 2523 dev_warn(&dev->rdev.lldi.pdev->dev, 2524 "Negative advice on abort - tid %u status %d (%s)\n", 2525 ep->hwtid, req->status, neg_adv_str(req->status)); 2526 return 0; 2527 } 2528 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 2529 ep->com.state); 2530 set_bit(PEER_ABORT, &ep->com.history); 2531 2532 /* 2533 * Wake up any threads in rdma_init() or rdma_fini(). 2534 * However, this is not needed if com state is just 2535 * MPA_REQ_SENT 2536 */ 2537 if (ep->com.state != MPA_REQ_SENT) 2538 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 2539 2540 mutex_lock(&ep->com.mutex); 2541 switch (ep->com.state) { 2542 case CONNECTING: 2543 break; 2544 case MPA_REQ_WAIT: 2545 (void)stop_ep_timer(ep); 2546 break; 2547 case MPA_REQ_SENT: 2548 (void)stop_ep_timer(ep); 2549 if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1)) 2550 connect_reply_upcall(ep, -ECONNRESET); 2551 else { 2552 /* 2553 * we just don't send notification upwards because we 2554 * want to retry with mpa_v1 without upper layers even 2555 * knowing it. 2556 * 2557 * do some housekeeping so as to re-initiate the 2558 * connection 2559 */ 2560 PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__, 2561 mpa_rev); 2562 ep->retry_with_mpa_v1 = 1; 2563 } 2564 break; 2565 case MPA_REP_SENT: 2566 break; 2567 case MPA_REQ_RCVD: 2568 break; 2569 case MORIBUND: 2570 case CLOSING: 2571 stop_ep_timer(ep); 2572 /*FALLTHROUGH*/ 2573 case FPDU_MODE: 2574 if (ep->com.cm_id && ep->com.qp) { 2575 attrs.next_state = C4IW_QP_STATE_ERROR; 2576 ret = c4iw_modify_qp(ep->com.qp->rhp, 2577 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 2578 &attrs, 1); 2579 if (ret) 2580 printk(KERN_ERR MOD 2581 "%s - qp <- error failed!\n", 2582 __func__); 2583 } 2584 peer_abort_upcall(ep); 2585 break; 2586 case ABORTING: 2587 break; 2588 case DEAD: 2589 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); 2590 mutex_unlock(&ep->com.mutex); 2591 return 0; 2592 default: 2593 BUG_ON(1); 2594 break; 2595 } 2596 dst_confirm(ep->dst); 2597 if (ep->com.state != ABORTING) { 2598 __state_set(&ep->com, DEAD); 2599 /* we don't release if we want to retry with mpa_v1 */ 2600 if (!ep->retry_with_mpa_v1) 2601 release = 1; 2602 } 2603 mutex_unlock(&ep->com.mutex); 2604 2605 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); 2606 if (!rpl_skb) { 2607 printk(KERN_ERR MOD "%s - cannot allocate skb!\n", 2608 __func__); 2609 release = 1; 2610 goto out; 2611 } 2612 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 2613 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); 2614 INIT_TP_WR(rpl, ep->hwtid); 2615 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); 2616 rpl->cmd = CPL_ABORT_NO_RST; 2617 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); 2618 out: 2619 if (release) 2620 release_ep_resources(ep); 2621 else if (ep->retry_with_mpa_v1) { 2622 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); 2623 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); 2624 dst_release(ep->dst); 2625 cxgb4_l2t_release(ep->l2t); 2626 c4iw_reconnect(ep); 2627 } 2628 2629 return 0; 2630 } 2631 2632 static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2633 { 2634 struct c4iw_ep *ep; 2635 struct c4iw_qp_attributes attrs; 2636 struct cpl_close_con_rpl *rpl = cplhdr(skb); 2637 int release = 0; 2638 struct tid_info *t = dev->rdev.lldi.tids; 2639 unsigned int tid = GET_TID(rpl); 2640 2641 ep = lookup_tid(t, tid); 2642 2643 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2644 BUG_ON(!ep); 2645 2646 /* The cm_id may be null if we failed to connect */ 2647 mutex_lock(&ep->com.mutex); 2648 switch (ep->com.state) { 2649 case CLOSING: 2650 __state_set(&ep->com, MORIBUND); 2651 break; 2652 case MORIBUND: 2653 (void)stop_ep_timer(ep); 2654 if ((ep->com.cm_id) && (ep->com.qp)) { 2655 attrs.next_state = C4IW_QP_STATE_IDLE; 2656 c4iw_modify_qp(ep->com.qp->rhp, 2657 ep->com.qp, 2658 C4IW_QP_ATTR_NEXT_STATE, 2659 &attrs, 1); 2660 } 2661 close_complete_upcall(ep, 0); 2662 __state_set(&ep->com, DEAD); 2663 release = 1; 2664 break; 2665 case ABORTING: 2666 case DEAD: 2667 break; 2668 default: 2669 BUG_ON(1); 2670 break; 2671 } 2672 mutex_unlock(&ep->com.mutex); 2673 if (release) 2674 release_ep_resources(ep); 2675 return 0; 2676 } 2677 2678 static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) 2679 { 2680 struct cpl_rdma_terminate *rpl = cplhdr(skb); 2681 struct tid_info *t = dev->rdev.lldi.tids; 2682 unsigned int tid = GET_TID(rpl); 2683 struct c4iw_ep *ep; 2684 struct c4iw_qp_attributes attrs; 2685 2686 ep = lookup_tid(t, tid); 2687 BUG_ON(!ep); 2688 2689 if (ep && ep->com.qp) { 2690 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, 2691 ep->com.qp->wq.sq.qid); 2692 attrs.next_state = C4IW_QP_STATE_TERMINATE; 2693 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2694 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2695 } else 2696 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); 2697 2698 return 0; 2699 } 2700 2701 /* 2702 * Upcall from the adapter indicating data has been transmitted. 2703 * For us its just the single MPA request or reply. We can now free 2704 * the skb holding the mpa message. 2705 */ 2706 static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) 2707 { 2708 struct c4iw_ep *ep; 2709 struct cpl_fw4_ack *hdr = cplhdr(skb); 2710 u8 credits = hdr->credits; 2711 unsigned int tid = GET_TID(hdr); 2712 struct tid_info *t = dev->rdev.lldi.tids; 2713 2714 2715 ep = lookup_tid(t, tid); 2716 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); 2717 if (credits == 0) { 2718 PDBG("%s 0 credit ack ep %p tid %u state %u\n", 2719 __func__, ep, ep->hwtid, state_read(&ep->com)); 2720 return 0; 2721 } 2722 2723 dst_confirm(ep->dst); 2724 if (ep->mpa_skb) { 2725 PDBG("%s last streaming msg ack ep %p tid %u state %u " 2726 "initiator %u freeing skb\n", __func__, ep, ep->hwtid, 2727 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); 2728 kfree_skb(ep->mpa_skb); 2729 ep->mpa_skb = NULL; 2730 } 2731 return 0; 2732 } 2733 2734 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 2735 { 2736 int err = 0; 2737 int disconnect = 0; 2738 struct c4iw_ep *ep = to_ep(cm_id); 2739 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2740 2741 mutex_lock(&ep->com.mutex); 2742 if (ep->com.state == DEAD) { 2743 mutex_unlock(&ep->com.mutex); 2744 c4iw_put_ep(&ep->com); 2745 return -ECONNRESET; 2746 } 2747 set_bit(ULP_REJECT, &ep->com.history); 2748 BUG_ON(ep->com.state != MPA_REQ_RCVD); 2749 if (mpa_rev == 0) 2750 abort_connection(ep, NULL, GFP_KERNEL); 2751 else { 2752 err = send_mpa_reject(ep, pdata, pdata_len); 2753 disconnect = 1; 2754 } 2755 mutex_unlock(&ep->com.mutex); 2756 if (disconnect) 2757 err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 2758 c4iw_put_ep(&ep->com); 2759 return 0; 2760 } 2761 2762 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 2763 { 2764 int err; 2765 struct c4iw_qp_attributes attrs; 2766 enum c4iw_qp_attr_mask mask; 2767 struct c4iw_ep *ep = to_ep(cm_id); 2768 struct c4iw_dev *h = to_c4iw_dev(cm_id->device); 2769 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); 2770 2771 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2772 2773 mutex_lock(&ep->com.mutex); 2774 if (ep->com.state == DEAD) { 2775 err = -ECONNRESET; 2776 goto err; 2777 } 2778 2779 BUG_ON(ep->com.state != MPA_REQ_RCVD); 2780 BUG_ON(!qp); 2781 2782 set_bit(ULP_ACCEPT, &ep->com.history); 2783 if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) || 2784 (conn_param->ird > cur_max_read_depth(ep->com.dev))) { 2785 abort_connection(ep, NULL, GFP_KERNEL); 2786 err = -EINVAL; 2787 goto err; 2788 } 2789 2790 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 2791 if (conn_param->ord > ep->ird) { 2792 if (RELAXED_IRD_NEGOTIATION) { 2793 ep->ord = ep->ird; 2794 } else { 2795 ep->ird = conn_param->ird; 2796 ep->ord = conn_param->ord; 2797 send_mpa_reject(ep, conn_param->private_data, 2798 conn_param->private_data_len); 2799 abort_connection(ep, NULL, GFP_KERNEL); 2800 err = -ENOMEM; 2801 goto err; 2802 } 2803 } 2804 if (conn_param->ird < ep->ord) { 2805 if (RELAXED_IRD_NEGOTIATION && 2806 ep->ord <= h->rdev.lldi.max_ordird_qp) { 2807 conn_param->ird = ep->ord; 2808 } else { 2809 abort_connection(ep, NULL, GFP_KERNEL); 2810 err = -ENOMEM; 2811 goto err; 2812 } 2813 } 2814 } 2815 ep->ird = conn_param->ird; 2816 ep->ord = conn_param->ord; 2817 2818 if (ep->mpa_attr.version == 1) { 2819 if (peer2peer && ep->ird == 0) 2820 ep->ird = 1; 2821 } else { 2822 if (peer2peer && 2823 (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) && 2824 (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ord == 0) 2825 ep->ird = 1; 2826 } 2827 2828 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); 2829 2830 cm_id->add_ref(cm_id); 2831 ep->com.cm_id = cm_id; 2832 ep->com.qp = qp; 2833 ref_qp(ep); 2834 2835 /* bind QP to EP and move to RTS */ 2836 attrs.mpa_attr = ep->mpa_attr; 2837 attrs.max_ird = ep->ird; 2838 attrs.max_ord = ep->ord; 2839 attrs.llp_stream_handle = ep; 2840 attrs.next_state = C4IW_QP_STATE_RTS; 2841 2842 /* bind QP and TID with INIT_WR */ 2843 mask = C4IW_QP_ATTR_NEXT_STATE | 2844 C4IW_QP_ATTR_LLP_STREAM_HANDLE | 2845 C4IW_QP_ATTR_MPA_ATTR | 2846 C4IW_QP_ATTR_MAX_IRD | 2847 C4IW_QP_ATTR_MAX_ORD; 2848 2849 err = c4iw_modify_qp(ep->com.qp->rhp, 2850 ep->com.qp, mask, &attrs, 1); 2851 if (err) 2852 goto err1; 2853 err = send_mpa_reply(ep, conn_param->private_data, 2854 conn_param->private_data_len); 2855 if (err) 2856 goto err1; 2857 2858 __state_set(&ep->com, FPDU_MODE); 2859 established_upcall(ep); 2860 mutex_unlock(&ep->com.mutex); 2861 c4iw_put_ep(&ep->com); 2862 return 0; 2863 err1: 2864 ep->com.cm_id = NULL; 2865 abort_connection(ep, NULL, GFP_KERNEL); 2866 cm_id->rem_ref(cm_id); 2867 err: 2868 mutex_unlock(&ep->com.mutex); 2869 c4iw_put_ep(&ep->com); 2870 return err; 2871 } 2872 2873 static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id) 2874 { 2875 struct in_device *ind; 2876 int found = 0; 2877 struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr; 2878 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr; 2879 2880 ind = in_dev_get(dev->rdev.lldi.ports[0]); 2881 if (!ind) 2882 return -EADDRNOTAVAIL; 2883 for_primary_ifa(ind) { 2884 laddr->sin_addr.s_addr = ifa->ifa_address; 2885 raddr->sin_addr.s_addr = ifa->ifa_address; 2886 found = 1; 2887 break; 2888 } 2889 endfor_ifa(ind); 2890 in_dev_put(ind); 2891 return found ? 0 : -EADDRNOTAVAIL; 2892 } 2893 2894 static int get_lladdr(struct net_device *dev, struct in6_addr *addr, 2895 unsigned char banned_flags) 2896 { 2897 struct inet6_dev *idev; 2898 int err = -EADDRNOTAVAIL; 2899 2900 rcu_read_lock(); 2901 idev = __in6_dev_get(dev); 2902 if (idev != NULL) { 2903 struct inet6_ifaddr *ifp; 2904 2905 read_lock_bh(&idev->lock); 2906 list_for_each_entry(ifp, &idev->addr_list, if_list) { 2907 if (ifp->scope == IFA_LINK && 2908 !(ifp->flags & banned_flags)) { 2909 memcpy(addr, &ifp->addr, 16); 2910 err = 0; 2911 break; 2912 } 2913 } 2914 read_unlock_bh(&idev->lock); 2915 } 2916 rcu_read_unlock(); 2917 return err; 2918 } 2919 2920 static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id) 2921 { 2922 struct in6_addr uninitialized_var(addr); 2923 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->local_addr; 2924 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->remote_addr; 2925 2926 if (get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) { 2927 memcpy(la6->sin6_addr.s6_addr, &addr, 16); 2928 memcpy(ra6->sin6_addr.s6_addr, &addr, 16); 2929 return 0; 2930 } 2931 return -EADDRNOTAVAIL; 2932 } 2933 2934 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 2935 { 2936 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2937 struct c4iw_ep *ep; 2938 int err = 0; 2939 struct sockaddr_in *laddr; 2940 struct sockaddr_in *raddr; 2941 struct sockaddr_in6 *laddr6; 2942 struct sockaddr_in6 *raddr6; 2943 struct iwpm_dev_data pm_reg_msg; 2944 struct iwpm_sa_data pm_msg; 2945 __u8 *ra; 2946 int iptype; 2947 int iwpm_err = 0; 2948 2949 if ((conn_param->ord > cur_max_read_depth(dev)) || 2950 (conn_param->ird > cur_max_read_depth(dev))) { 2951 err = -EINVAL; 2952 goto out; 2953 } 2954 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 2955 if (!ep) { 2956 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 2957 err = -ENOMEM; 2958 goto out; 2959 } 2960 init_timer(&ep->timer); 2961 ep->plen = conn_param->private_data_len; 2962 if (ep->plen) 2963 memcpy(ep->mpa_pkt + sizeof(struct mpa_message), 2964 conn_param->private_data, ep->plen); 2965 ep->ird = conn_param->ird; 2966 ep->ord = conn_param->ord; 2967 2968 if (peer2peer && ep->ord == 0) 2969 ep->ord = 1; 2970 2971 cm_id->add_ref(cm_id); 2972 ep->com.dev = dev; 2973 ep->com.cm_id = cm_id; 2974 ep->com.qp = get_qhp(dev, conn_param->qpn); 2975 if (!ep->com.qp) { 2976 PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn); 2977 err = -EINVAL; 2978 goto fail1; 2979 } 2980 ref_qp(ep); 2981 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, 2982 ep->com.qp, cm_id); 2983 2984 /* 2985 * Allocate an active TID to initiate a TCP connection. 2986 */ 2987 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep); 2988 if (ep->atid == -1) { 2989 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); 2990 err = -ENOMEM; 2991 goto fail1; 2992 } 2993 insert_handle(dev, &dev->atid_idr, ep, ep->atid); 2994 2995 memcpy(&ep->com.local_addr, &cm_id->local_addr, 2996 sizeof(ep->com.local_addr)); 2997 memcpy(&ep->com.remote_addr, &cm_id->remote_addr, 2998 sizeof(ep->com.remote_addr)); 2999 3000 /* No port mapper available, go with the specified peer information */ 3001 memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr, 3002 sizeof(ep->com.mapped_local_addr)); 3003 memcpy(&ep->com.mapped_remote_addr, &cm_id->remote_addr, 3004 sizeof(ep->com.mapped_remote_addr)); 3005 3006 c4iw_form_reg_msg(dev, &pm_reg_msg); 3007 iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW); 3008 if (iwpm_err) { 3009 PDBG("%s: Port Mapper reg pid fail (err = %d).\n", 3010 __func__, iwpm_err); 3011 } 3012 if (iwpm_valid_pid() && !iwpm_err) { 3013 c4iw_form_pm_msg(ep, &pm_msg); 3014 iwpm_err = iwpm_add_and_query_mapping(&pm_msg, RDMA_NL_C4IW); 3015 if (iwpm_err) 3016 PDBG("%s: Port Mapper query fail (err = %d).\n", 3017 __func__, iwpm_err); 3018 else 3019 c4iw_record_pm_msg(ep, &pm_msg); 3020 } 3021 if (iwpm_create_mapinfo(&ep->com.local_addr, 3022 &ep->com.mapped_local_addr, RDMA_NL_C4IW)) { 3023 iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW); 3024 err = -ENOMEM; 3025 goto fail1; 3026 } 3027 print_addr(&ep->com, __func__, "add_query/create_mapinfo"); 3028 set_bit(RELEASE_MAPINFO, &ep->com.flags); 3029 3030 laddr = (struct sockaddr_in *)&ep->com.mapped_local_addr; 3031 raddr = (struct sockaddr_in *)&ep->com.mapped_remote_addr; 3032 laddr6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr; 3033 raddr6 = (struct sockaddr_in6 *) &ep->com.mapped_remote_addr; 3034 3035 if (cm_id->remote_addr.ss_family == AF_INET) { 3036 iptype = 4; 3037 ra = (__u8 *)&raddr->sin_addr; 3038 3039 /* 3040 * Handle loopback requests to INADDR_ANY. 3041 */ 3042 if ((__force int)raddr->sin_addr.s_addr == INADDR_ANY) { 3043 err = pick_local_ipaddrs(dev, cm_id); 3044 if (err) 3045 goto fail1; 3046 } 3047 3048 /* find a route */ 3049 PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n", 3050 __func__, &laddr->sin_addr, ntohs(laddr->sin_port), 3051 ra, ntohs(raddr->sin_port)); 3052 ep->dst = find_route(dev, laddr->sin_addr.s_addr, 3053 raddr->sin_addr.s_addr, laddr->sin_port, 3054 raddr->sin_port, 0); 3055 } else { 3056 iptype = 6; 3057 ra = (__u8 *)&raddr6->sin6_addr; 3058 3059 /* 3060 * Handle loopback requests to INADDR_ANY. 3061 */ 3062 if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) { 3063 err = pick_local_ip6addrs(dev, cm_id); 3064 if (err) 3065 goto fail1; 3066 } 3067 3068 /* find a route */ 3069 PDBG("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n", 3070 __func__, laddr6->sin6_addr.s6_addr, 3071 ntohs(laddr6->sin6_port), 3072 raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port)); 3073 ep->dst = find_route6(dev, laddr6->sin6_addr.s6_addr, 3074 raddr6->sin6_addr.s6_addr, 3075 laddr6->sin6_port, raddr6->sin6_port, 0, 3076 raddr6->sin6_scope_id); 3077 } 3078 if (!ep->dst) { 3079 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); 3080 err = -EHOSTUNREACH; 3081 goto fail2; 3082 } 3083 3084 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true); 3085 if (err) { 3086 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 3087 goto fail3; 3088 } 3089 3090 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 3091 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 3092 ep->l2t->idx); 3093 3094 state_set(&ep->com, CONNECTING); 3095 ep->tos = 0; 3096 3097 /* send connect request to rnic */ 3098 err = send_connect(ep); 3099 if (!err) 3100 goto out; 3101 3102 cxgb4_l2t_release(ep->l2t); 3103 fail3: 3104 dst_release(ep->dst); 3105 fail2: 3106 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 3107 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 3108 fail1: 3109 cm_id->rem_ref(cm_id); 3110 c4iw_put_ep(&ep->com); 3111 out: 3112 return err; 3113 } 3114 3115 static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) 3116 { 3117 int err; 3118 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) 3119 &ep->com.mapped_local_addr; 3120 3121 c4iw_init_wr_wait(&ep->com.wr_wait); 3122 err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0], 3123 ep->stid, &sin6->sin6_addr, 3124 sin6->sin6_port, 3125 ep->com.dev->rdev.lldi.rxq_ids[0]); 3126 if (!err) 3127 err = c4iw_wait_for_reply(&ep->com.dev->rdev, 3128 &ep->com.wr_wait, 3129 0, 0, __func__); 3130 else if (err > 0) 3131 err = net_xmit_errno(err); 3132 if (err) 3133 pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n", 3134 err, ep->stid, 3135 sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port)); 3136 return err; 3137 } 3138 3139 static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) 3140 { 3141 int err; 3142 struct sockaddr_in *sin = (struct sockaddr_in *) 3143 &ep->com.mapped_local_addr; 3144 3145 if (dev->rdev.lldi.enable_fw_ofld_conn) { 3146 do { 3147 err = cxgb4_create_server_filter( 3148 ep->com.dev->rdev.lldi.ports[0], ep->stid, 3149 sin->sin_addr.s_addr, sin->sin_port, 0, 3150 ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0); 3151 if (err == -EBUSY) { 3152 set_current_state(TASK_UNINTERRUPTIBLE); 3153 schedule_timeout(usecs_to_jiffies(100)); 3154 } 3155 } while (err == -EBUSY); 3156 } else { 3157 c4iw_init_wr_wait(&ep->com.wr_wait); 3158 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], 3159 ep->stid, sin->sin_addr.s_addr, sin->sin_port, 3160 0, ep->com.dev->rdev.lldi.rxq_ids[0]); 3161 if (!err) 3162 err = c4iw_wait_for_reply(&ep->com.dev->rdev, 3163 &ep->com.wr_wait, 3164 0, 0, __func__); 3165 else if (err > 0) 3166 err = net_xmit_errno(err); 3167 } 3168 if (err) 3169 pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n" 3170 , err, ep->stid, 3171 &sin->sin_addr, ntohs(sin->sin_port)); 3172 return err; 3173 } 3174 3175 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) 3176 { 3177 int err = 0; 3178 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 3179 struct c4iw_listen_ep *ep; 3180 struct iwpm_dev_data pm_reg_msg; 3181 struct iwpm_sa_data pm_msg; 3182 int iwpm_err = 0; 3183 3184 might_sleep(); 3185 3186 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 3187 if (!ep) { 3188 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 3189 err = -ENOMEM; 3190 goto fail1; 3191 } 3192 PDBG("%s ep %p\n", __func__, ep); 3193 cm_id->add_ref(cm_id); 3194 ep->com.cm_id = cm_id; 3195 ep->com.dev = dev; 3196 ep->backlog = backlog; 3197 memcpy(&ep->com.local_addr, &cm_id->local_addr, 3198 sizeof(ep->com.local_addr)); 3199 3200 /* 3201 * Allocate a server TID. 3202 */ 3203 if (dev->rdev.lldi.enable_fw_ofld_conn && 3204 ep->com.local_addr.ss_family == AF_INET) 3205 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, 3206 cm_id->local_addr.ss_family, ep); 3207 else 3208 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, 3209 cm_id->local_addr.ss_family, ep); 3210 3211 if (ep->stid == -1) { 3212 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__); 3213 err = -ENOMEM; 3214 goto fail2; 3215 } 3216 insert_handle(dev, &dev->stid_idr, ep, ep->stid); 3217 3218 /* No port mapper available, go with the specified info */ 3219 memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr, 3220 sizeof(ep->com.mapped_local_addr)); 3221 3222 c4iw_form_reg_msg(dev, &pm_reg_msg); 3223 iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW); 3224 if (iwpm_err) { 3225 PDBG("%s: Port Mapper reg pid fail (err = %d).\n", 3226 __func__, iwpm_err); 3227 } 3228 if (iwpm_valid_pid() && !iwpm_err) { 3229 memcpy(&pm_msg.loc_addr, &ep->com.local_addr, 3230 sizeof(ep->com.local_addr)); 3231 iwpm_err = iwpm_add_mapping(&pm_msg, RDMA_NL_C4IW); 3232 if (iwpm_err) 3233 PDBG("%s: Port Mapper query fail (err = %d).\n", 3234 __func__, iwpm_err); 3235 else 3236 memcpy(&ep->com.mapped_local_addr, 3237 &pm_msg.mapped_loc_addr, 3238 sizeof(ep->com.mapped_local_addr)); 3239 } 3240 if (iwpm_create_mapinfo(&ep->com.local_addr, 3241 &ep->com.mapped_local_addr, RDMA_NL_C4IW)) { 3242 err = -ENOMEM; 3243 goto fail3; 3244 } 3245 print_addr(&ep->com, __func__, "add_mapping/create_mapinfo"); 3246 3247 set_bit(RELEASE_MAPINFO, &ep->com.flags); 3248 state_set(&ep->com, LISTEN); 3249 if (ep->com.local_addr.ss_family == AF_INET) 3250 err = create_server4(dev, ep); 3251 else 3252 err = create_server6(dev, ep); 3253 if (!err) { 3254 cm_id->provider_data = ep; 3255 goto out; 3256 } 3257 3258 fail3: 3259 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, 3260 ep->com.local_addr.ss_family); 3261 fail2: 3262 cm_id->rem_ref(cm_id); 3263 c4iw_put_ep(&ep->com); 3264 fail1: 3265 out: 3266 return err; 3267 } 3268 3269 int c4iw_destroy_listen(struct iw_cm_id *cm_id) 3270 { 3271 int err; 3272 struct c4iw_listen_ep *ep = to_listen_ep(cm_id); 3273 3274 PDBG("%s ep %p\n", __func__, ep); 3275 3276 might_sleep(); 3277 state_set(&ep->com, DEAD); 3278 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn && 3279 ep->com.local_addr.ss_family == AF_INET) { 3280 err = cxgb4_remove_server_filter( 3281 ep->com.dev->rdev.lldi.ports[0], ep->stid, 3282 ep->com.dev->rdev.lldi.rxq_ids[0], 0); 3283 } else { 3284 c4iw_init_wr_wait(&ep->com.wr_wait); 3285 err = cxgb4_remove_server( 3286 ep->com.dev->rdev.lldi.ports[0], ep->stid, 3287 ep->com.dev->rdev.lldi.rxq_ids[0], 0); 3288 if (err) 3289 goto done; 3290 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 3291 0, 0, __func__); 3292 } 3293 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid); 3294 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, 3295 ep->com.local_addr.ss_family); 3296 done: 3297 cm_id->rem_ref(cm_id); 3298 c4iw_put_ep(&ep->com); 3299 return err; 3300 } 3301 3302 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) 3303 { 3304 int ret = 0; 3305 int close = 0; 3306 int fatal = 0; 3307 struct c4iw_rdev *rdev; 3308 3309 mutex_lock(&ep->com.mutex); 3310 3311 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, 3312 states[ep->com.state], abrupt); 3313 3314 rdev = &ep->com.dev->rdev; 3315 if (c4iw_fatal_error(rdev)) { 3316 fatal = 1; 3317 close_complete_upcall(ep, -EIO); 3318 ep->com.state = DEAD; 3319 } 3320 switch (ep->com.state) { 3321 case MPA_REQ_WAIT: 3322 case MPA_REQ_SENT: 3323 case MPA_REQ_RCVD: 3324 case MPA_REP_SENT: 3325 case FPDU_MODE: 3326 close = 1; 3327 if (abrupt) 3328 ep->com.state = ABORTING; 3329 else { 3330 ep->com.state = CLOSING; 3331 start_ep_timer(ep); 3332 } 3333 set_bit(CLOSE_SENT, &ep->com.flags); 3334 break; 3335 case CLOSING: 3336 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { 3337 close = 1; 3338 if (abrupt) { 3339 (void)stop_ep_timer(ep); 3340 ep->com.state = ABORTING; 3341 } else 3342 ep->com.state = MORIBUND; 3343 } 3344 break; 3345 case MORIBUND: 3346 case ABORTING: 3347 case DEAD: 3348 PDBG("%s ignoring disconnect ep %p state %u\n", 3349 __func__, ep, ep->com.state); 3350 break; 3351 default: 3352 BUG(); 3353 break; 3354 } 3355 3356 if (close) { 3357 if (abrupt) { 3358 set_bit(EP_DISC_ABORT, &ep->com.history); 3359 close_complete_upcall(ep, -ECONNRESET); 3360 ret = send_abort(ep, NULL, gfp); 3361 } else { 3362 set_bit(EP_DISC_CLOSE, &ep->com.history); 3363 ret = send_halfclose(ep, gfp); 3364 } 3365 if (ret) 3366 fatal = 1; 3367 } 3368 mutex_unlock(&ep->com.mutex); 3369 if (fatal) 3370 release_ep_resources(ep); 3371 return ret; 3372 } 3373 3374 static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, 3375 struct cpl_fw6_msg_ofld_connection_wr_rpl *req) 3376 { 3377 struct c4iw_ep *ep; 3378 int atid = be32_to_cpu(req->tid); 3379 3380 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, 3381 (__force u32) req->tid); 3382 if (!ep) 3383 return; 3384 3385 switch (req->retval) { 3386 case FW_ENOMEM: 3387 set_bit(ACT_RETRY_NOMEM, &ep->com.history); 3388 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 3389 send_fw_act_open_req(ep, atid); 3390 return; 3391 } 3392 case FW_EADDRINUSE: 3393 set_bit(ACT_RETRY_INUSE, &ep->com.history); 3394 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 3395 send_fw_act_open_req(ep, atid); 3396 return; 3397 } 3398 break; 3399 default: 3400 pr_info("%s unexpected ofld conn wr retval %d\n", 3401 __func__, req->retval); 3402 break; 3403 } 3404 pr_err("active ofld_connect_wr failure %d atid %d\n", 3405 req->retval, atid); 3406 mutex_lock(&dev->rdev.stats.lock); 3407 dev->rdev.stats.act_ofld_conn_fails++; 3408 mutex_unlock(&dev->rdev.stats.lock); 3409 connect_reply_upcall(ep, status2errno(req->retval)); 3410 state_set(&ep->com, DEAD); 3411 remove_handle(dev, &dev->atid_idr, atid); 3412 cxgb4_free_atid(dev->rdev.lldi.tids, atid); 3413 dst_release(ep->dst); 3414 cxgb4_l2t_release(ep->l2t); 3415 c4iw_put_ep(&ep->com); 3416 } 3417 3418 static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, 3419 struct cpl_fw6_msg_ofld_connection_wr_rpl *req) 3420 { 3421 struct sk_buff *rpl_skb; 3422 struct cpl_pass_accept_req *cpl; 3423 int ret; 3424 3425 rpl_skb = (struct sk_buff *)(unsigned long)req->cookie; 3426 BUG_ON(!rpl_skb); 3427 if (req->retval) { 3428 PDBG("%s passive open failure %d\n", __func__, req->retval); 3429 mutex_lock(&dev->rdev.stats.lock); 3430 dev->rdev.stats.pas_ofld_conn_fails++; 3431 mutex_unlock(&dev->rdev.stats.lock); 3432 kfree_skb(rpl_skb); 3433 } else { 3434 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb); 3435 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 3436 (__force u32) htonl( 3437 (__force u32) req->tid))); 3438 ret = pass_accept_req(dev, rpl_skb); 3439 if (!ret) 3440 kfree_skb(rpl_skb); 3441 } 3442 return; 3443 } 3444 3445 static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) 3446 { 3447 struct cpl_fw6_msg *rpl = cplhdr(skb); 3448 struct cpl_fw6_msg_ofld_connection_wr_rpl *req; 3449 3450 switch (rpl->type) { 3451 case FW6_TYPE_CQE: 3452 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); 3453 break; 3454 case FW6_TYPE_OFLD_CONNECTION_WR_RPL: 3455 req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data; 3456 switch (req->t_state) { 3457 case TCP_SYN_SENT: 3458 active_ofld_conn_reply(dev, skb, req); 3459 break; 3460 case TCP_SYN_RECV: 3461 passive_ofld_conn_reply(dev, skb, req); 3462 break; 3463 default: 3464 pr_err("%s unexpected ofld conn wr state %d\n", 3465 __func__, req->t_state); 3466 break; 3467 } 3468 break; 3469 } 3470 return 0; 3471 } 3472 3473 static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) 3474 { 3475 u32 l2info; 3476 u16 vlantag, len, hdr_len, eth_hdr_len; 3477 u8 intf; 3478 struct cpl_rx_pkt *cpl = cplhdr(skb); 3479 struct cpl_pass_accept_req *req; 3480 struct tcp_options_received tmp_opt; 3481 struct c4iw_dev *dev; 3482 3483 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); 3484 /* Store values from cpl_rx_pkt in temporary location. */ 3485 vlantag = (__force u16) cpl->vlan; 3486 len = (__force u16) cpl->len; 3487 l2info = (__force u32) cpl->l2info; 3488 hdr_len = (__force u16) cpl->hdr_len; 3489 intf = cpl->iff; 3490 3491 __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header)); 3492 3493 /* 3494 * We need to parse the TCP options from SYN packet. 3495 * to generate cpl_pass_accept_req. 3496 */ 3497 memset(&tmp_opt, 0, sizeof(tmp_opt)); 3498 tcp_clear_options(&tmp_opt); 3499 tcp_parse_options(skb, &tmp_opt, 0, NULL); 3500 3501 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); 3502 memset(req, 0, sizeof(*req)); 3503 req->l2info = cpu_to_be16(V_SYN_INTF(intf) | 3504 V_SYN_MAC_IDX(G_RX_MACIDX( 3505 (__force int) htonl(l2info))) | 3506 F_SYN_XACT_MATCH); 3507 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ? 3508 G_RX_ETHHDR_LEN((__force int) htonl(l2info)) : 3509 G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info)); 3510 req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN( 3511 (__force int) htonl(l2info))) | 3512 V_TCP_HDR_LEN(G_RX_TCPHDR_LEN( 3513 (__force int) htons(hdr_len))) | 3514 V_IP_HDR_LEN(G_RX_IPHDR_LEN( 3515 (__force int) htons(hdr_len))) | 3516 V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len))); 3517 req->vlan = (__force __be16) vlantag; 3518 req->len = (__force __be16) len; 3519 req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) | 3520 PASS_OPEN_TOS(tos)); 3521 req->tcpopt.mss = htons(tmp_opt.mss_clamp); 3522 if (tmp_opt.wscale_ok) 3523 req->tcpopt.wsf = tmp_opt.snd_wscale; 3524 req->tcpopt.tstamp = tmp_opt.saw_tstamp; 3525 if (tmp_opt.sack_ok) 3526 req->tcpopt.sack = 1; 3527 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0)); 3528 return; 3529 } 3530 3531 static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, 3532 __be32 laddr, __be16 lport, 3533 __be32 raddr, __be16 rport, 3534 u32 rcv_isn, u32 filter, u16 window, 3535 u32 rss_qid, u8 port_id) 3536 { 3537 struct sk_buff *req_skb; 3538 struct fw_ofld_connection_wr *req; 3539 struct cpl_pass_accept_req *cpl = cplhdr(skb); 3540 int ret; 3541 3542 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); 3543 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req)); 3544 memset(req, 0, sizeof(*req)); 3545 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F); 3546 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); 3547 req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F); 3548 req->le.filter = (__force __be32) filter; 3549 req->le.lport = lport; 3550 req->le.pport = rport; 3551 req->le.u.ipv4.lip = laddr; 3552 req->le.u.ipv4.pip = raddr; 3553 req->tcb.rcv_nxt = htonl(rcv_isn + 1); 3554 req->tcb.rcv_adv = htons(window); 3555 req->tcb.t_state_to_astid = 3556 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) | 3557 FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) | 3558 FW_OFLD_CONNECTION_WR_ASTID_V( 3559 GET_PASS_OPEN_TID(ntohl(cpl->tos_stid)))); 3560 3561 /* 3562 * We store the qid in opt2 which will be used by the firmware 3563 * to send us the wr response. 3564 */ 3565 req->tcb.opt2 = htonl(RSS_QUEUE_V(rss_qid)); 3566 3567 /* 3568 * We initialize the MSS index in TCB to 0xF. 3569 * So that when driver sends cpl_pass_accept_rpl 3570 * TCB picks up the correct value. If this was 0 3571 * TP will ignore any value > 0 for MSS index. 3572 */ 3573 req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF)); 3574 req->cookie = (unsigned long)skb; 3575 3576 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); 3577 ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); 3578 if (ret < 0) { 3579 pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__, 3580 ret); 3581 kfree_skb(skb); 3582 kfree_skb(req_skb); 3583 } 3584 } 3585 3586 /* 3587 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt 3588 * messages when a filter is being used instead of server to 3589 * redirect a syn packet. When packets hit filter they are redirected 3590 * to the offload queue and driver tries to establish the connection 3591 * using firmware work request. 3592 */ 3593 static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) 3594 { 3595 int stid; 3596 unsigned int filter; 3597 struct ethhdr *eh = NULL; 3598 struct vlan_ethhdr *vlan_eh = NULL; 3599 struct iphdr *iph; 3600 struct tcphdr *tcph; 3601 struct rss_header *rss = (void *)skb->data; 3602 struct cpl_rx_pkt *cpl = (void *)skb->data; 3603 struct cpl_pass_accept_req *req = (void *)(rss + 1); 3604 struct l2t_entry *e; 3605 struct dst_entry *dst; 3606 struct c4iw_ep *lep; 3607 u16 window; 3608 struct port_info *pi; 3609 struct net_device *pdev; 3610 u16 rss_qid, eth_hdr_len; 3611 int step; 3612 u32 tx_chan; 3613 struct neighbour *neigh; 3614 3615 /* Drop all non-SYN packets */ 3616 if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN))) 3617 goto reject; 3618 3619 /* 3620 * Drop all packets which did not hit the filter. 3621 * Unlikely to happen. 3622 */ 3623 if (!(rss->filter_hit && rss->filter_tid)) 3624 goto reject; 3625 3626 /* 3627 * Calculate the server tid from filter hit index from cpl_rx_pkt. 3628 */ 3629 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val); 3630 3631 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); 3632 if (!lep) { 3633 PDBG("%s connect request on invalid stid %d\n", __func__, stid); 3634 goto reject; 3635 } 3636 3637 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ? 3638 G_RX_ETHHDR_LEN(htonl(cpl->l2info)) : 3639 G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info)); 3640 if (eth_hdr_len == ETH_HLEN) { 3641 eh = (struct ethhdr *)(req + 1); 3642 iph = (struct iphdr *)(eh + 1); 3643 } else { 3644 vlan_eh = (struct vlan_ethhdr *)(req + 1); 3645 iph = (struct iphdr *)(vlan_eh + 1); 3646 skb->vlan_tci = ntohs(cpl->vlan); 3647 } 3648 3649 if (iph->version != 0x4) 3650 goto reject; 3651 3652 tcph = (struct tcphdr *)(iph + 1); 3653 skb_set_network_header(skb, (void *)iph - (void *)rss); 3654 skb_set_transport_header(skb, (void *)tcph - (void *)rss); 3655 skb_get(skb); 3656 3657 PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__, 3658 ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr), 3659 ntohs(tcph->source), iph->tos); 3660 3661 dst = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source, 3662 iph->tos); 3663 if (!dst) { 3664 pr_err("%s - failed to find dst entry!\n", 3665 __func__); 3666 goto reject; 3667 } 3668 neigh = dst_neigh_lookup_skb(dst, skb); 3669 3670 if (!neigh) { 3671 pr_err("%s - failed to allocate neigh!\n", 3672 __func__); 3673 goto free_dst; 3674 } 3675 3676 if (neigh->dev->flags & IFF_LOOPBACK) { 3677 pdev = ip_dev_find(&init_net, iph->daddr); 3678 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, 3679 pdev, 0); 3680 pi = (struct port_info *)netdev_priv(pdev); 3681 tx_chan = cxgb4_port_chan(pdev); 3682 dev_put(pdev); 3683 } else { 3684 pdev = get_real_dev(neigh->dev); 3685 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, 3686 pdev, 0); 3687 pi = (struct port_info *)netdev_priv(pdev); 3688 tx_chan = cxgb4_port_chan(pdev); 3689 } 3690 neigh_release(neigh); 3691 if (!e) { 3692 pr_err("%s - failed to allocate l2t entry!\n", 3693 __func__); 3694 goto free_dst; 3695 } 3696 3697 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; 3698 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; 3699 window = (__force u16) htons((__force u16)tcph->window); 3700 3701 /* Calcuate filter portion for LE region. */ 3702 filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple( 3703 dev->rdev.lldi.ports[0], 3704 e)); 3705 3706 /* 3707 * Synthesize the cpl_pass_accept_req. We have everything except the 3708 * TID. Once firmware sends a reply with TID we update the TID field 3709 * in cpl and pass it through the regular cpl_pass_accept_req path. 3710 */ 3711 build_cpl_pass_accept_req(skb, stid, iph->tos); 3712 send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr, 3713 tcph->source, ntohl(tcph->seq), filter, window, 3714 rss_qid, pi->port_id); 3715 cxgb4_l2t_release(e); 3716 free_dst: 3717 dst_release(dst); 3718 reject: 3719 return 0; 3720 } 3721 3722 /* 3723 * These are the real handlers that are called from a 3724 * work queue. 3725 */ 3726 static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = { 3727 [CPL_ACT_ESTABLISH] = act_establish, 3728 [CPL_ACT_OPEN_RPL] = act_open_rpl, 3729 [CPL_RX_DATA] = rx_data, 3730 [CPL_ABORT_RPL_RSS] = abort_rpl, 3731 [CPL_ABORT_RPL] = abort_rpl, 3732 [CPL_PASS_OPEN_RPL] = pass_open_rpl, 3733 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl, 3734 [CPL_PASS_ACCEPT_REQ] = pass_accept_req, 3735 [CPL_PASS_ESTABLISH] = pass_establish, 3736 [CPL_PEER_CLOSE] = peer_close, 3737 [CPL_ABORT_REQ_RSS] = peer_abort, 3738 [CPL_CLOSE_CON_RPL] = close_con_rpl, 3739 [CPL_RDMA_TERMINATE] = terminate, 3740 [CPL_FW4_ACK] = fw4_ack, 3741 [CPL_FW6_MSG] = deferred_fw6_msg, 3742 [CPL_RX_PKT] = rx_pkt 3743 }; 3744 3745 static void process_timeout(struct c4iw_ep *ep) 3746 { 3747 struct c4iw_qp_attributes attrs; 3748 int abort = 1; 3749 3750 mutex_lock(&ep->com.mutex); 3751 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, 3752 ep->com.state); 3753 set_bit(TIMEDOUT, &ep->com.history); 3754 switch (ep->com.state) { 3755 case MPA_REQ_SENT: 3756 __state_set(&ep->com, ABORTING); 3757 connect_reply_upcall(ep, -ETIMEDOUT); 3758 break; 3759 case MPA_REQ_WAIT: 3760 __state_set(&ep->com, ABORTING); 3761 break; 3762 case CLOSING: 3763 case MORIBUND: 3764 if (ep->com.cm_id && ep->com.qp) { 3765 attrs.next_state = C4IW_QP_STATE_ERROR; 3766 c4iw_modify_qp(ep->com.qp->rhp, 3767 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 3768 &attrs, 1); 3769 } 3770 __state_set(&ep->com, ABORTING); 3771 close_complete_upcall(ep, -ETIMEDOUT); 3772 break; 3773 case ABORTING: 3774 case DEAD: 3775 3776 /* 3777 * These states are expected if the ep timed out at the same 3778 * time as another thread was calling stop_ep_timer(). 3779 * So we silently do nothing for these states. 3780 */ 3781 abort = 0; 3782 break; 3783 default: 3784 WARN(1, "%s unexpected state ep %p tid %u state %u\n", 3785 __func__, ep, ep->hwtid, ep->com.state); 3786 abort = 0; 3787 } 3788 if (abort) 3789 abort_connection(ep, NULL, GFP_KERNEL); 3790 mutex_unlock(&ep->com.mutex); 3791 c4iw_put_ep(&ep->com); 3792 } 3793 3794 static void process_timedout_eps(void) 3795 { 3796 struct c4iw_ep *ep; 3797 3798 spin_lock_irq(&timeout_lock); 3799 while (!list_empty(&timeout_list)) { 3800 struct list_head *tmp; 3801 3802 tmp = timeout_list.next; 3803 list_del(tmp); 3804 tmp->next = NULL; 3805 tmp->prev = NULL; 3806 spin_unlock_irq(&timeout_lock); 3807 ep = list_entry(tmp, struct c4iw_ep, entry); 3808 process_timeout(ep); 3809 spin_lock_irq(&timeout_lock); 3810 } 3811 spin_unlock_irq(&timeout_lock); 3812 } 3813 3814 static void process_work(struct work_struct *work) 3815 { 3816 struct sk_buff *skb = NULL; 3817 struct c4iw_dev *dev; 3818 struct cpl_act_establish *rpl; 3819 unsigned int opcode; 3820 int ret; 3821 3822 process_timedout_eps(); 3823 while ((skb = skb_dequeue(&rxq))) { 3824 rpl = cplhdr(skb); 3825 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); 3826 opcode = rpl->ot.opcode; 3827 3828 BUG_ON(!work_handlers[opcode]); 3829 ret = work_handlers[opcode](dev, skb); 3830 if (!ret) 3831 kfree_skb(skb); 3832 process_timedout_eps(); 3833 } 3834 } 3835 3836 static DECLARE_WORK(skb_work, process_work); 3837 3838 static void ep_timeout(unsigned long arg) 3839 { 3840 struct c4iw_ep *ep = (struct c4iw_ep *)arg; 3841 int kickit = 0; 3842 3843 spin_lock(&timeout_lock); 3844 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 3845 /* 3846 * Only insert if it is not already on the list. 3847 */ 3848 if (!ep->entry.next) { 3849 list_add_tail(&ep->entry, &timeout_list); 3850 kickit = 1; 3851 } 3852 } 3853 spin_unlock(&timeout_lock); 3854 if (kickit) 3855 queue_work(workq, &skb_work); 3856 } 3857 3858 /* 3859 * All the CM events are handled on a work queue to have a safe context. 3860 */ 3861 static int sched(struct c4iw_dev *dev, struct sk_buff *skb) 3862 { 3863 3864 /* 3865 * Save dev in the skb->cb area. 3866 */ 3867 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev; 3868 3869 /* 3870 * Queue the skb and schedule the worker thread. 3871 */ 3872 skb_queue_tail(&rxq, skb); 3873 queue_work(workq, &skb_work); 3874 return 0; 3875 } 3876 3877 static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 3878 { 3879 struct cpl_set_tcb_rpl *rpl = cplhdr(skb); 3880 3881 if (rpl->status != CPL_ERR_NONE) { 3882 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u " 3883 "for tid %u\n", rpl->status, GET_TID(rpl)); 3884 } 3885 kfree_skb(skb); 3886 return 0; 3887 } 3888 3889 static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) 3890 { 3891 struct cpl_fw6_msg *rpl = cplhdr(skb); 3892 struct c4iw_wr_wait *wr_waitp; 3893 int ret; 3894 3895 PDBG("%s type %u\n", __func__, rpl->type); 3896 3897 switch (rpl->type) { 3898 case FW6_TYPE_WR_RPL: 3899 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); 3900 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; 3901 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); 3902 if (wr_waitp) 3903 c4iw_wake_up(wr_waitp, ret ? -ret : 0); 3904 kfree_skb(skb); 3905 break; 3906 case FW6_TYPE_CQE: 3907 case FW6_TYPE_OFLD_CONNECTION_WR_RPL: 3908 sched(dev, skb); 3909 break; 3910 default: 3911 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, 3912 rpl->type); 3913 kfree_skb(skb); 3914 break; 3915 } 3916 return 0; 3917 } 3918 3919 static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) 3920 { 3921 struct cpl_abort_req_rss *req = cplhdr(skb); 3922 struct c4iw_ep *ep; 3923 struct tid_info *t = dev->rdev.lldi.tids; 3924 unsigned int tid = GET_TID(req); 3925 3926 ep = lookup_tid(t, tid); 3927 if (!ep) { 3928 printk(KERN_WARNING MOD 3929 "Abort on non-existent endpoint, tid %d\n", tid); 3930 kfree_skb(skb); 3931 return 0; 3932 } 3933 if (is_neg_adv(req->status)) { 3934 dev_warn(&dev->rdev.lldi.pdev->dev, 3935 "Negative advice on abort - tid %u status %d (%s)\n", 3936 ep->hwtid, req->status, neg_adv_str(req->status)); 3937 kfree_skb(skb); 3938 return 0; 3939 } 3940 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 3941 ep->com.state); 3942 3943 /* 3944 * Wake up any threads in rdma_init() or rdma_fini(). 3945 * However, if we are on MPAv2 and want to retry with MPAv1 3946 * then, don't wake up yet. 3947 */ 3948 if (mpa_rev == 2 && !ep->tried_with_mpa_v1) { 3949 if (ep->com.state != MPA_REQ_SENT) 3950 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 3951 } else 3952 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 3953 sched(dev, skb); 3954 return 0; 3955 } 3956 3957 /* 3958 * Most upcalls from the T4 Core go to sched() to 3959 * schedule the processing on a work queue. 3960 */ 3961 c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = { 3962 [CPL_ACT_ESTABLISH] = sched, 3963 [CPL_ACT_OPEN_RPL] = sched, 3964 [CPL_RX_DATA] = sched, 3965 [CPL_ABORT_RPL_RSS] = sched, 3966 [CPL_ABORT_RPL] = sched, 3967 [CPL_PASS_OPEN_RPL] = sched, 3968 [CPL_CLOSE_LISTSRV_RPL] = sched, 3969 [CPL_PASS_ACCEPT_REQ] = sched, 3970 [CPL_PASS_ESTABLISH] = sched, 3971 [CPL_PEER_CLOSE] = sched, 3972 [CPL_CLOSE_CON_RPL] = sched, 3973 [CPL_ABORT_REQ_RSS] = peer_abort_intr, 3974 [CPL_RDMA_TERMINATE] = sched, 3975 [CPL_FW4_ACK] = sched, 3976 [CPL_SET_TCB_RPL] = set_tcb_rpl, 3977 [CPL_FW6_MSG] = fw6_msg, 3978 [CPL_RX_PKT] = sched 3979 }; 3980 3981 int __init c4iw_cm_init(void) 3982 { 3983 spin_lock_init(&timeout_lock); 3984 skb_queue_head_init(&rxq); 3985 3986 workq = create_singlethread_workqueue("iw_cxgb4"); 3987 if (!workq) 3988 return -ENOMEM; 3989 3990 return 0; 3991 } 3992 3993 void c4iw_cm_term(void) 3994 { 3995 WARN_ON(!list_empty(&timeout_list)); 3996 flush_workqueue(workq); 3997 destroy_workqueue(workq); 3998 } 3999