1 /* 2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/module.h> 33 #include <linux/list.h> 34 #include <linux/workqueue.h> 35 #include <linux/skbuff.h> 36 #include <linux/timer.h> 37 #include <linux/notifier.h> 38 #include <linux/inetdevice.h> 39 #include <linux/ip.h> 40 #include <linux/tcp.h> 41 #include <linux/if_vlan.h> 42 43 #include <net/neighbour.h> 44 #include <net/netevent.h> 45 #include <net/route.h> 46 #include <net/tcp.h> 47 48 #include "iw_cxgb4.h" 49 50 static char *states[] = { 51 "idle", 52 "listen", 53 "connecting", 54 "mpa_wait_req", 55 "mpa_req_sent", 56 "mpa_req_rcvd", 57 "mpa_rep_sent", 58 "fpdu_mode", 59 "aborting", 60 "closing", 61 "moribund", 62 "dead", 63 NULL, 64 }; 65 66 static int nocong; 67 module_param(nocong, int, 0644); 68 MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)"); 69 70 static int enable_ecn; 71 module_param(enable_ecn, int, 0644); 72 MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)"); 73 74 static int dack_mode = 1; 75 module_param(dack_mode, int, 0644); 76 MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)"); 77 78 int c4iw_max_read_depth = 8; 79 module_param(c4iw_max_read_depth, int, 0644); 80 MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)"); 81 82 static int enable_tcp_timestamps; 83 module_param(enable_tcp_timestamps, int, 0644); 84 MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); 85 86 static int enable_tcp_sack; 87 module_param(enable_tcp_sack, int, 0644); 88 MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)"); 89 90 static int enable_tcp_window_scaling = 1; 91 module_param(enable_tcp_window_scaling, int, 0644); 92 MODULE_PARM_DESC(enable_tcp_window_scaling, 93 "Enable tcp window scaling (default=1)"); 94 95 int c4iw_debug; 96 module_param(c4iw_debug, int, 0644); 97 MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)"); 98 99 static int peer2peer; 100 module_param(peer2peer, int, 0644); 101 MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)"); 102 103 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; 104 module_param(p2p_type, int, 0644); 105 MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: " 106 "1=RDMA_READ 0=RDMA_WRITE (default 1)"); 107 108 static int ep_timeout_secs = 60; 109 module_param(ep_timeout_secs, int, 0644); 110 MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " 111 "in seconds (default=60)"); 112 113 static int mpa_rev = 1; 114 module_param(mpa_rev, int, 0644); 115 MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " 116 "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft" 117 " compliant (default=1)"); 118 119 static int markers_enabled; 120 module_param(markers_enabled, int, 0644); 121 MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)"); 122 123 static int crc_enabled = 1; 124 module_param(crc_enabled, int, 0644); 125 MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)"); 126 127 static int rcv_win = 256 * 1024; 128 module_param(rcv_win, int, 0644); 129 MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)"); 130 131 static int snd_win = 128 * 1024; 132 module_param(snd_win, int, 0644); 133 MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)"); 134 135 static struct workqueue_struct *workq; 136 137 static struct sk_buff_head rxq; 138 139 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); 140 static void ep_timeout(unsigned long arg); 141 static void connect_reply_upcall(struct c4iw_ep *ep, int status); 142 143 static LIST_HEAD(timeout_list); 144 static spinlock_t timeout_lock; 145 146 static void deref_qp(struct c4iw_ep *ep) 147 { 148 c4iw_qp_rem_ref(&ep->com.qp->ibqp); 149 clear_bit(QP_REFERENCED, &ep->com.flags); 150 } 151 152 static void ref_qp(struct c4iw_ep *ep) 153 { 154 set_bit(QP_REFERENCED, &ep->com.flags); 155 c4iw_qp_add_ref(&ep->com.qp->ibqp); 156 } 157 158 static void start_ep_timer(struct c4iw_ep *ep) 159 { 160 PDBG("%s ep %p\n", __func__, ep); 161 if (timer_pending(&ep->timer)) { 162 pr_err("%s timer already started! ep %p\n", 163 __func__, ep); 164 return; 165 } 166 clear_bit(TIMEOUT, &ep->com.flags); 167 c4iw_get_ep(&ep->com); 168 ep->timer.expires = jiffies + ep_timeout_secs * HZ; 169 ep->timer.data = (unsigned long)ep; 170 ep->timer.function = ep_timeout; 171 add_timer(&ep->timer); 172 } 173 174 static void stop_ep_timer(struct c4iw_ep *ep) 175 { 176 PDBG("%s ep %p stopping\n", __func__, ep); 177 del_timer_sync(&ep->timer); 178 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) 179 c4iw_put_ep(&ep->com); 180 } 181 182 static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, 183 struct l2t_entry *l2e) 184 { 185 int error = 0; 186 187 if (c4iw_fatal_error(rdev)) { 188 kfree_skb(skb); 189 PDBG("%s - device in error state - dropping\n", __func__); 190 return -EIO; 191 } 192 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); 193 if (error < 0) 194 kfree_skb(skb); 195 return error < 0 ? error : 0; 196 } 197 198 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) 199 { 200 int error = 0; 201 202 if (c4iw_fatal_error(rdev)) { 203 kfree_skb(skb); 204 PDBG("%s - device in error state - dropping\n", __func__); 205 return -EIO; 206 } 207 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); 208 if (error < 0) 209 kfree_skb(skb); 210 return error < 0 ? error : 0; 211 } 212 213 static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) 214 { 215 struct cpl_tid_release *req; 216 217 skb = get_skb(skb, sizeof *req, GFP_KERNEL); 218 if (!skb) 219 return; 220 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); 221 INIT_TP_WR(req, hwtid); 222 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); 223 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); 224 c4iw_ofld_send(rdev, skb); 225 return; 226 } 227 228 static void set_emss(struct c4iw_ep *ep, u16 opt) 229 { 230 ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40; 231 ep->mss = ep->emss; 232 if (GET_TCPOPT_TSTAMP(opt)) 233 ep->emss -= 12; 234 if (ep->emss < 128) 235 ep->emss = 128; 236 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt), 237 ep->mss, ep->emss); 238 } 239 240 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) 241 { 242 enum c4iw_ep_state state; 243 244 mutex_lock(&epc->mutex); 245 state = epc->state; 246 mutex_unlock(&epc->mutex); 247 return state; 248 } 249 250 static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 251 { 252 epc->state = new; 253 } 254 255 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 256 { 257 mutex_lock(&epc->mutex); 258 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]); 259 __state_set(epc, new); 260 mutex_unlock(&epc->mutex); 261 return; 262 } 263 264 static void *alloc_ep(int size, gfp_t gfp) 265 { 266 struct c4iw_ep_common *epc; 267 268 epc = kzalloc(size, gfp); 269 if (epc) { 270 kref_init(&epc->kref); 271 mutex_init(&epc->mutex); 272 c4iw_init_wr_wait(&epc->wr_wait); 273 } 274 PDBG("%s alloc ep %p\n", __func__, epc); 275 return epc; 276 } 277 278 void _c4iw_free_ep(struct kref *kref) 279 { 280 struct c4iw_ep *ep; 281 282 ep = container_of(kref, struct c4iw_ep, com.kref); 283 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); 284 if (test_bit(QP_REFERENCED, &ep->com.flags)) 285 deref_qp(ep); 286 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { 287 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); 288 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); 289 dst_release(ep->dst); 290 cxgb4_l2t_release(ep->l2t); 291 } 292 kfree(ep); 293 } 294 295 static void release_ep_resources(struct c4iw_ep *ep) 296 { 297 set_bit(RELEASE_RESOURCES, &ep->com.flags); 298 c4iw_put_ep(&ep->com); 299 } 300 301 static int status2errno(int status) 302 { 303 switch (status) { 304 case CPL_ERR_NONE: 305 return 0; 306 case CPL_ERR_CONN_RESET: 307 return -ECONNRESET; 308 case CPL_ERR_ARP_MISS: 309 return -EHOSTUNREACH; 310 case CPL_ERR_CONN_TIMEDOUT: 311 return -ETIMEDOUT; 312 case CPL_ERR_TCAM_FULL: 313 return -ENOMEM; 314 case CPL_ERR_CONN_EXIST: 315 return -EADDRINUSE; 316 default: 317 return -EIO; 318 } 319 } 320 321 /* 322 * Try and reuse skbs already allocated... 323 */ 324 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) 325 { 326 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) { 327 skb_trim(skb, 0); 328 skb_get(skb); 329 skb_reset_transport_header(skb); 330 } else { 331 skb = alloc_skb(len, gfp); 332 } 333 return skb; 334 } 335 336 static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip, 337 __be32 peer_ip, __be16 local_port, 338 __be16 peer_port, u8 tos) 339 { 340 struct rtable *rt; 341 struct flowi4 fl4; 342 343 rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip, 344 peer_port, local_port, IPPROTO_TCP, 345 tos, 0); 346 if (IS_ERR(rt)) 347 return NULL; 348 return rt; 349 } 350 351 static void arp_failure_discard(void *handle, struct sk_buff *skb) 352 { 353 PDBG("%s c4iw_dev %p\n", __func__, handle); 354 kfree_skb(skb); 355 } 356 357 /* 358 * Handle an ARP failure for an active open. 359 */ 360 static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) 361 { 362 printk(KERN_ERR MOD "ARP failure duing connect\n"); 363 kfree_skb(skb); 364 } 365 366 /* 367 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant 368 * and send it along. 369 */ 370 static void abort_arp_failure(void *handle, struct sk_buff *skb) 371 { 372 struct c4iw_rdev *rdev = handle; 373 struct cpl_abort_req *req = cplhdr(skb); 374 375 PDBG("%s rdev %p\n", __func__, rdev); 376 req->cmd = CPL_ABORT_NO_RST; 377 c4iw_ofld_send(rdev, skb); 378 } 379 380 static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) 381 { 382 unsigned int flowclen = 80; 383 struct fw_flowc_wr *flowc; 384 int i; 385 386 skb = get_skb(skb, flowclen, GFP_KERNEL); 387 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); 388 389 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) | 390 FW_FLOWC_WR_NPARAMS(8)); 391 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen, 392 16)) | FW_WR_FLOWID(ep->hwtid)); 393 394 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 395 flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8); 396 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 397 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); 398 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 399 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan); 400 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 401 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid); 402 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 403 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq); 404 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 405 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); 406 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 407 flowc->mnemval[6].val = cpu_to_be32(snd_win); 408 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 409 flowc->mnemval[7].val = cpu_to_be32(ep->emss); 410 /* Pad WR to 16 byte boundary */ 411 flowc->mnemval[8].mnemonic = 0; 412 flowc->mnemval[8].val = 0; 413 for (i = 0; i < 9; i++) { 414 flowc->mnemval[i].r4[0] = 0; 415 flowc->mnemval[i].r4[1] = 0; 416 flowc->mnemval[i].r4[2] = 0; 417 } 418 419 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 420 c4iw_ofld_send(&ep->com.dev->rdev, skb); 421 } 422 423 static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp) 424 { 425 struct cpl_close_con_req *req; 426 struct sk_buff *skb; 427 int wrlen = roundup(sizeof *req, 16); 428 429 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 430 skb = get_skb(NULL, wrlen, gfp); 431 if (!skb) { 432 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); 433 return -ENOMEM; 434 } 435 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 436 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 437 req = (struct cpl_close_con_req *) skb_put(skb, wrlen); 438 memset(req, 0, wrlen); 439 INIT_TP_WR(req, ep->hwtid); 440 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, 441 ep->hwtid)); 442 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 443 } 444 445 static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) 446 { 447 struct cpl_abort_req *req; 448 int wrlen = roundup(sizeof *req, 16); 449 450 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 451 skb = get_skb(skb, wrlen, gfp); 452 if (!skb) { 453 printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 454 __func__); 455 return -ENOMEM; 456 } 457 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 458 t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure); 459 req = (struct cpl_abort_req *) skb_put(skb, wrlen); 460 memset(req, 0, wrlen); 461 INIT_TP_WR(req, ep->hwtid); 462 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); 463 req->cmd = CPL_ABORT_SEND_RST; 464 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 465 } 466 467 #define VLAN_NONE 0xfff 468 #define FILTER_SEL_VLAN_NONE 0xffff 469 #define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */ 470 #define FILTER_SEL_WIDTH_VIN_P_FC \ 471 (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/ 472 #define FILTER_SEL_WIDTH_TAG_P_FC \ 473 (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */ 474 #define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC) 475 476 static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst, 477 struct l2t_entry *l2t) 478 { 479 unsigned int ntuple = 0; 480 u32 viid; 481 482 switch (dev->rdev.lldi.filt_mode) { 483 484 /* default filter mode */ 485 case HW_TPL_FR_MT_PR_IV_P_FC: 486 if (l2t->vlan == VLAN_NONE) 487 ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC; 488 else { 489 ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC; 490 ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC; 491 } 492 ntuple |= l2t->lport << S_PORT | IPPROTO_TCP << 493 FILTER_SEL_WIDTH_VLD_TAG_P_FC; 494 break; 495 case HW_TPL_FR_MT_PR_OV_P_FC: { 496 viid = cxgb4_port_viid(l2t->neigh->dev); 497 498 ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC; 499 ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC; 500 ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC; 501 ntuple |= l2t->lport << S_PORT | IPPROTO_TCP << 502 FILTER_SEL_WIDTH_VLD_TAG_P_FC; 503 break; 504 } 505 default: 506 break; 507 } 508 return ntuple; 509 } 510 511 static int send_connect(struct c4iw_ep *ep) 512 { 513 struct cpl_act_open_req *req; 514 struct cpl_t5_act_open_req *t5_req; 515 struct sk_buff *skb; 516 u64 opt0; 517 u32 opt2; 518 unsigned int mtu_idx; 519 int wscale; 520 int size = is_t4(ep->com.dev->rdev.lldi.adapter_type) ? 521 sizeof(struct cpl_act_open_req) : 522 sizeof(struct cpl_t5_act_open_req); 523 int wrlen = roundup(size, 16); 524 525 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); 526 527 skb = get_skb(NULL, wrlen, GFP_KERNEL); 528 if (!skb) { 529 printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 530 __func__); 531 return -ENOMEM; 532 } 533 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 534 535 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 536 wscale = compute_wscale(rcv_win); 537 opt0 = (nocong ? NO_CONG(1) : 0) | 538 KEEP_ALIVE(1) | 539 DELACK(1) | 540 WND_SCALE(wscale) | 541 MSS_IDX(mtu_idx) | 542 L2T_IDX(ep->l2t->idx) | 543 TX_CHAN(ep->tx_chan) | 544 SMAC_SEL(ep->smac_idx) | 545 DSCP(ep->tos) | 546 ULP_MODE(ULP_MODE_TCPDDP) | 547 RCV_BUFSIZ(rcv_win>>10); 548 opt2 = RX_CHANNEL(0) | 549 CCTRL_ECN(enable_ecn) | 550 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 551 if (enable_tcp_timestamps) 552 opt2 |= TSTAMPS_EN(1); 553 if (enable_tcp_sack) 554 opt2 |= SACK_EN(1); 555 if (wscale && enable_tcp_window_scaling) 556 opt2 |= WND_SCALE_EN(1); 557 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); 558 559 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { 560 req = (struct cpl_act_open_req *) skb_put(skb, wrlen); 561 INIT_TP_WR(req, 0); 562 OPCODE_TID(req) = cpu_to_be32( 563 MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 564 ((ep->rss_qid << 14) | ep->atid))); 565 req->local_port = ep->com.local_addr.sin_port; 566 req->peer_port = ep->com.remote_addr.sin_port; 567 req->local_ip = ep->com.local_addr.sin_addr.s_addr; 568 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; 569 req->opt0 = cpu_to_be64(opt0); 570 req->params = cpu_to_be32(select_ntuple(ep->com.dev, 571 ep->dst, ep->l2t)); 572 req->opt2 = cpu_to_be32(opt2); 573 } else { 574 t5_req = (struct cpl_t5_act_open_req *) skb_put(skb, wrlen); 575 INIT_TP_WR(t5_req, 0); 576 OPCODE_TID(t5_req) = cpu_to_be32( 577 MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 578 ((ep->rss_qid << 14) | ep->atid))); 579 t5_req->local_port = ep->com.local_addr.sin_port; 580 t5_req->peer_port = ep->com.remote_addr.sin_port; 581 t5_req->local_ip = ep->com.local_addr.sin_addr.s_addr; 582 t5_req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; 583 t5_req->opt0 = cpu_to_be64(opt0); 584 t5_req->params = cpu_to_be64(V_FILTER_TUPLE( 585 select_ntuple(ep->com.dev, ep->dst, ep->l2t))); 586 t5_req->opt2 = cpu_to_be32(opt2); 587 } 588 589 set_bit(ACT_OPEN_REQ, &ep->com.history); 590 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 591 } 592 593 static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, 594 u8 mpa_rev_to_use) 595 { 596 int mpalen, wrlen; 597 struct fw_ofld_tx_data_wr *req; 598 struct mpa_message *mpa; 599 struct mpa_v2_conn_params mpa_v2_params; 600 601 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 602 603 BUG_ON(skb_cloned(skb)); 604 605 mpalen = sizeof(*mpa) + ep->plen; 606 if (mpa_rev_to_use == 2) 607 mpalen += sizeof(struct mpa_v2_conn_params); 608 wrlen = roundup(mpalen + sizeof *req, 16); 609 skb = get_skb(skb, wrlen, GFP_KERNEL); 610 if (!skb) { 611 connect_reply_upcall(ep, -ENOMEM); 612 return; 613 } 614 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 615 616 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 617 memset(req, 0, wrlen); 618 req->op_to_immdlen = cpu_to_be32( 619 FW_WR_OP(FW_OFLD_TX_DATA_WR) | 620 FW_WR_COMPL(1) | 621 FW_WR_IMMDLEN(mpalen)); 622 req->flowid_len16 = cpu_to_be32( 623 FW_WR_FLOWID(ep->hwtid) | 624 FW_WR_LEN16(wrlen >> 4)); 625 req->plen = cpu_to_be32(mpalen); 626 req->tunnel_to_proxy = cpu_to_be32( 627 FW_OFLD_TX_DATA_WR_FLUSH(1) | 628 FW_OFLD_TX_DATA_WR_SHOVE(1)); 629 630 mpa = (struct mpa_message *)(req + 1); 631 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); 632 mpa->flags = (crc_enabled ? MPA_CRC : 0) | 633 (markers_enabled ? MPA_MARKERS : 0) | 634 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); 635 mpa->private_data_size = htons(ep->plen); 636 mpa->revision = mpa_rev_to_use; 637 if (mpa_rev_to_use == 1) { 638 ep->tried_with_mpa_v1 = 1; 639 ep->retry_with_mpa_v1 = 0; 640 } 641 642 if (mpa_rev_to_use == 2) { 643 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 644 sizeof (struct mpa_v2_conn_params)); 645 mpa_v2_params.ird = htons((u16)ep->ird); 646 mpa_v2_params.ord = htons((u16)ep->ord); 647 648 if (peer2peer) { 649 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 650 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) 651 mpa_v2_params.ord |= 652 htons(MPA_V2_RDMA_WRITE_RTR); 653 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) 654 mpa_v2_params.ord |= 655 htons(MPA_V2_RDMA_READ_RTR); 656 } 657 memcpy(mpa->private_data, &mpa_v2_params, 658 sizeof(struct mpa_v2_conn_params)); 659 660 if (ep->plen) 661 memcpy(mpa->private_data + 662 sizeof(struct mpa_v2_conn_params), 663 ep->mpa_pkt + sizeof(*mpa), ep->plen); 664 } else 665 if (ep->plen) 666 memcpy(mpa->private_data, 667 ep->mpa_pkt + sizeof(*mpa), ep->plen); 668 669 /* 670 * Reference the mpa skb. This ensures the data area 671 * will remain in memory until the hw acks the tx. 672 * Function fw4_ack() will deref it. 673 */ 674 skb_get(skb); 675 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 676 BUG_ON(ep->mpa_skb); 677 ep->mpa_skb = skb; 678 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 679 start_ep_timer(ep); 680 state_set(&ep->com, MPA_REQ_SENT); 681 ep->mpa_attr.initiator = 1; 682 return; 683 } 684 685 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) 686 { 687 int mpalen, wrlen; 688 struct fw_ofld_tx_data_wr *req; 689 struct mpa_message *mpa; 690 struct sk_buff *skb; 691 struct mpa_v2_conn_params mpa_v2_params; 692 693 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 694 695 mpalen = sizeof(*mpa) + plen; 696 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) 697 mpalen += sizeof(struct mpa_v2_conn_params); 698 wrlen = roundup(mpalen + sizeof *req, 16); 699 700 skb = get_skb(NULL, wrlen, GFP_KERNEL); 701 if (!skb) { 702 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); 703 return -ENOMEM; 704 } 705 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 706 707 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 708 memset(req, 0, wrlen); 709 req->op_to_immdlen = cpu_to_be32( 710 FW_WR_OP(FW_OFLD_TX_DATA_WR) | 711 FW_WR_COMPL(1) | 712 FW_WR_IMMDLEN(mpalen)); 713 req->flowid_len16 = cpu_to_be32( 714 FW_WR_FLOWID(ep->hwtid) | 715 FW_WR_LEN16(wrlen >> 4)); 716 req->plen = cpu_to_be32(mpalen); 717 req->tunnel_to_proxy = cpu_to_be32( 718 FW_OFLD_TX_DATA_WR_FLUSH(1) | 719 FW_OFLD_TX_DATA_WR_SHOVE(1)); 720 721 mpa = (struct mpa_message *)(req + 1); 722 memset(mpa, 0, sizeof(*mpa)); 723 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 724 mpa->flags = MPA_REJECT; 725 mpa->revision = ep->mpa_attr.version; 726 mpa->private_data_size = htons(plen); 727 728 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 729 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 730 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 731 sizeof (struct mpa_v2_conn_params)); 732 mpa_v2_params.ird = htons(((u16)ep->ird) | 733 (peer2peer ? MPA_V2_PEER2PEER_MODEL : 734 0)); 735 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? 736 (p2p_type == 737 FW_RI_INIT_P2PTYPE_RDMA_WRITE ? 738 MPA_V2_RDMA_WRITE_RTR : p2p_type == 739 FW_RI_INIT_P2PTYPE_READ_REQ ? 740 MPA_V2_RDMA_READ_RTR : 0) : 0)); 741 memcpy(mpa->private_data, &mpa_v2_params, 742 sizeof(struct mpa_v2_conn_params)); 743 744 if (ep->plen) 745 memcpy(mpa->private_data + 746 sizeof(struct mpa_v2_conn_params), pdata, plen); 747 } else 748 if (plen) 749 memcpy(mpa->private_data, pdata, plen); 750 751 /* 752 * Reference the mpa skb again. This ensures the data area 753 * will remain in memory until the hw acks the tx. 754 * Function fw4_ack() will deref it. 755 */ 756 skb_get(skb); 757 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 758 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 759 BUG_ON(ep->mpa_skb); 760 ep->mpa_skb = skb; 761 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 762 } 763 764 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) 765 { 766 int mpalen, wrlen; 767 struct fw_ofld_tx_data_wr *req; 768 struct mpa_message *mpa; 769 struct sk_buff *skb; 770 struct mpa_v2_conn_params mpa_v2_params; 771 772 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 773 774 mpalen = sizeof(*mpa) + plen; 775 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) 776 mpalen += sizeof(struct mpa_v2_conn_params); 777 wrlen = roundup(mpalen + sizeof *req, 16); 778 779 skb = get_skb(NULL, wrlen, GFP_KERNEL); 780 if (!skb) { 781 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); 782 return -ENOMEM; 783 } 784 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 785 786 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); 787 memset(req, 0, wrlen); 788 req->op_to_immdlen = cpu_to_be32( 789 FW_WR_OP(FW_OFLD_TX_DATA_WR) | 790 FW_WR_COMPL(1) | 791 FW_WR_IMMDLEN(mpalen)); 792 req->flowid_len16 = cpu_to_be32( 793 FW_WR_FLOWID(ep->hwtid) | 794 FW_WR_LEN16(wrlen >> 4)); 795 req->plen = cpu_to_be32(mpalen); 796 req->tunnel_to_proxy = cpu_to_be32( 797 FW_OFLD_TX_DATA_WR_FLUSH(1) | 798 FW_OFLD_TX_DATA_WR_SHOVE(1)); 799 800 mpa = (struct mpa_message *)(req + 1); 801 memset(mpa, 0, sizeof(*mpa)); 802 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 803 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | 804 (markers_enabled ? MPA_MARKERS : 0); 805 mpa->revision = ep->mpa_attr.version; 806 mpa->private_data_size = htons(plen); 807 808 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 809 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 810 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 811 sizeof (struct mpa_v2_conn_params)); 812 mpa_v2_params.ird = htons((u16)ep->ird); 813 mpa_v2_params.ord = htons((u16)ep->ord); 814 if (peer2peer && (ep->mpa_attr.p2p_type != 815 FW_RI_INIT_P2PTYPE_DISABLED)) { 816 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 817 818 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) 819 mpa_v2_params.ord |= 820 htons(MPA_V2_RDMA_WRITE_RTR); 821 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) 822 mpa_v2_params.ord |= 823 htons(MPA_V2_RDMA_READ_RTR); 824 } 825 826 memcpy(mpa->private_data, &mpa_v2_params, 827 sizeof(struct mpa_v2_conn_params)); 828 829 if (ep->plen) 830 memcpy(mpa->private_data + 831 sizeof(struct mpa_v2_conn_params), pdata, plen); 832 } else 833 if (plen) 834 memcpy(mpa->private_data, pdata, plen); 835 836 /* 837 * Reference the mpa skb. This ensures the data area 838 * will remain in memory until the hw acks the tx. 839 * Function fw4_ack() will deref it. 840 */ 841 skb_get(skb); 842 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 843 ep->mpa_skb = skb; 844 state_set(&ep->com, MPA_REP_SENT); 845 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 846 } 847 848 static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) 849 { 850 struct c4iw_ep *ep; 851 struct cpl_act_establish *req = cplhdr(skb); 852 unsigned int tid = GET_TID(req); 853 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); 854 struct tid_info *t = dev->rdev.lldi.tids; 855 856 ep = lookup_atid(t, atid); 857 858 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid, 859 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); 860 861 dst_confirm(ep->dst); 862 863 /* setup the hwtid for this connection */ 864 ep->hwtid = tid; 865 cxgb4_insert_tid(t, ep, tid); 866 insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid); 867 868 ep->snd_seq = be32_to_cpu(req->snd_isn); 869 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 870 871 set_emss(ep, ntohs(req->tcp_opt)); 872 873 /* dealloc the atid */ 874 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); 875 cxgb4_free_atid(t, atid); 876 set_bit(ACT_ESTAB, &ep->com.history); 877 878 /* start MPA negotiation */ 879 send_flowc(ep, NULL); 880 if (ep->retry_with_mpa_v1) 881 send_mpa_req(ep, skb, 1); 882 else 883 send_mpa_req(ep, skb, mpa_rev); 884 885 return 0; 886 } 887 888 static void close_complete_upcall(struct c4iw_ep *ep) 889 { 890 struct iw_cm_event event; 891 892 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 893 memset(&event, 0, sizeof(event)); 894 event.event = IW_CM_EVENT_CLOSE; 895 if (ep->com.cm_id) { 896 PDBG("close complete delivered ep %p cm_id %p tid %u\n", 897 ep, ep->com.cm_id, ep->hwtid); 898 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 899 ep->com.cm_id->rem_ref(ep->com.cm_id); 900 ep->com.cm_id = NULL; 901 set_bit(CLOSE_UPCALL, &ep->com.history); 902 } 903 } 904 905 static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) 906 { 907 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 908 close_complete_upcall(ep); 909 state_set(&ep->com, ABORTING); 910 set_bit(ABORT_CONN, &ep->com.history); 911 return send_abort(ep, skb, gfp); 912 } 913 914 static void peer_close_upcall(struct c4iw_ep *ep) 915 { 916 struct iw_cm_event event; 917 918 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 919 memset(&event, 0, sizeof(event)); 920 event.event = IW_CM_EVENT_DISCONNECT; 921 if (ep->com.cm_id) { 922 PDBG("peer close delivered ep %p cm_id %p tid %u\n", 923 ep, ep->com.cm_id, ep->hwtid); 924 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 925 set_bit(DISCONN_UPCALL, &ep->com.history); 926 } 927 } 928 929 static void peer_abort_upcall(struct c4iw_ep *ep) 930 { 931 struct iw_cm_event event; 932 933 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 934 memset(&event, 0, sizeof(event)); 935 event.event = IW_CM_EVENT_CLOSE; 936 event.status = -ECONNRESET; 937 if (ep->com.cm_id) { 938 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep, 939 ep->com.cm_id, ep->hwtid); 940 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 941 ep->com.cm_id->rem_ref(ep->com.cm_id); 942 ep->com.cm_id = NULL; 943 set_bit(ABORT_UPCALL, &ep->com.history); 944 } 945 } 946 947 static void connect_reply_upcall(struct c4iw_ep *ep, int status) 948 { 949 struct iw_cm_event event; 950 951 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status); 952 memset(&event, 0, sizeof(event)); 953 event.event = IW_CM_EVENT_CONNECT_REPLY; 954 event.status = status; 955 event.local_addr = ep->com.local_addr; 956 event.remote_addr = ep->com.remote_addr; 957 958 if ((status == 0) || (status == -ECONNREFUSED)) { 959 if (!ep->tried_with_mpa_v1) { 960 /* this means MPA_v2 is used */ 961 event.private_data_len = ep->plen - 962 sizeof(struct mpa_v2_conn_params); 963 event.private_data = ep->mpa_pkt + 964 sizeof(struct mpa_message) + 965 sizeof(struct mpa_v2_conn_params); 966 } else { 967 /* this means MPA_v1 is used */ 968 event.private_data_len = ep->plen; 969 event.private_data = ep->mpa_pkt + 970 sizeof(struct mpa_message); 971 } 972 } 973 974 PDBG("%s ep %p tid %u status %d\n", __func__, ep, 975 ep->hwtid, status); 976 set_bit(CONN_RPL_UPCALL, &ep->com.history); 977 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 978 979 if (status < 0) { 980 ep->com.cm_id->rem_ref(ep->com.cm_id); 981 ep->com.cm_id = NULL; 982 } 983 } 984 985 static void connect_request_upcall(struct c4iw_ep *ep) 986 { 987 struct iw_cm_event event; 988 989 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 990 memset(&event, 0, sizeof(event)); 991 event.event = IW_CM_EVENT_CONNECT_REQUEST; 992 event.local_addr = ep->com.local_addr; 993 event.remote_addr = ep->com.remote_addr; 994 event.provider_data = ep; 995 if (!ep->tried_with_mpa_v1) { 996 /* this means MPA_v2 is used */ 997 event.ord = ep->ord; 998 event.ird = ep->ird; 999 event.private_data_len = ep->plen - 1000 sizeof(struct mpa_v2_conn_params); 1001 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + 1002 sizeof(struct mpa_v2_conn_params); 1003 } else { 1004 /* this means MPA_v1 is used. Send max supported */ 1005 event.ord = c4iw_max_read_depth; 1006 event.ird = c4iw_max_read_depth; 1007 event.private_data_len = ep->plen; 1008 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 1009 } 1010 if (state_read(&ep->parent_ep->com) != DEAD) { 1011 c4iw_get_ep(&ep->com); 1012 ep->parent_ep->com.cm_id->event_handler( 1013 ep->parent_ep->com.cm_id, 1014 &event); 1015 } 1016 set_bit(CONNREQ_UPCALL, &ep->com.history); 1017 c4iw_put_ep(&ep->parent_ep->com); 1018 ep->parent_ep = NULL; 1019 } 1020 1021 static void established_upcall(struct c4iw_ep *ep) 1022 { 1023 struct iw_cm_event event; 1024 1025 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1026 memset(&event, 0, sizeof(event)); 1027 event.event = IW_CM_EVENT_ESTABLISHED; 1028 event.ird = ep->ird; 1029 event.ord = ep->ord; 1030 if (ep->com.cm_id) { 1031 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1032 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1033 set_bit(ESTAB_UPCALL, &ep->com.history); 1034 } 1035 } 1036 1037 static int update_rx_credits(struct c4iw_ep *ep, u32 credits) 1038 { 1039 struct cpl_rx_data_ack *req; 1040 struct sk_buff *skb; 1041 int wrlen = roundup(sizeof *req, 16); 1042 1043 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); 1044 skb = get_skb(NULL, wrlen, GFP_KERNEL); 1045 if (!skb) { 1046 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); 1047 return 0; 1048 } 1049 1050 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); 1051 memset(req, 0, wrlen); 1052 INIT_TP_WR(req, ep->hwtid); 1053 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 1054 ep->hwtid)); 1055 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) | 1056 F_RX_DACK_CHANGE | 1057 V_RX_DACK_MODE(dack_mode)); 1058 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); 1059 c4iw_ofld_send(&ep->com.dev->rdev, skb); 1060 return credits; 1061 } 1062 1063 static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) 1064 { 1065 struct mpa_message *mpa; 1066 struct mpa_v2_conn_params *mpa_v2_params; 1067 u16 plen; 1068 u16 resp_ird, resp_ord; 1069 u8 rtr_mismatch = 0, insuff_ird = 0; 1070 struct c4iw_qp_attributes attrs; 1071 enum c4iw_qp_attr_mask mask; 1072 int err; 1073 1074 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1075 1076 /* 1077 * Stop mpa timer. If it expired, then the state has 1078 * changed and we bail since ep_timeout already aborted 1079 * the connection. 1080 */ 1081 stop_ep_timer(ep); 1082 if (state_read(&ep->com) != MPA_REQ_SENT) 1083 return; 1084 1085 /* 1086 * If we get more than the supported amount of private data 1087 * then we must fail this connection. 1088 */ 1089 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 1090 err = -EINVAL; 1091 goto err; 1092 } 1093 1094 /* 1095 * copy the new data into our accumulation buffer. 1096 */ 1097 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 1098 skb->len); 1099 ep->mpa_pkt_len += skb->len; 1100 1101 /* 1102 * if we don't even have the mpa message, then bail. 1103 */ 1104 if (ep->mpa_pkt_len < sizeof(*mpa)) 1105 return; 1106 mpa = (struct mpa_message *) ep->mpa_pkt; 1107 1108 /* Validate MPA header. */ 1109 if (mpa->revision > mpa_rev) { 1110 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," 1111 " Received = %d\n", __func__, mpa_rev, mpa->revision); 1112 err = -EPROTO; 1113 goto err; 1114 } 1115 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { 1116 err = -EPROTO; 1117 goto err; 1118 } 1119 1120 plen = ntohs(mpa->private_data_size); 1121 1122 /* 1123 * Fail if there's too much private data. 1124 */ 1125 if (plen > MPA_MAX_PRIVATE_DATA) { 1126 err = -EPROTO; 1127 goto err; 1128 } 1129 1130 /* 1131 * If plen does not account for pkt size 1132 */ 1133 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1134 err = -EPROTO; 1135 goto err; 1136 } 1137 1138 ep->plen = (u8) plen; 1139 1140 /* 1141 * If we don't have all the pdata yet, then bail. 1142 * We'll continue process when more data arrives. 1143 */ 1144 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1145 return; 1146 1147 if (mpa->flags & MPA_REJECT) { 1148 err = -ECONNREFUSED; 1149 goto err; 1150 } 1151 1152 /* 1153 * If we get here we have accumulated the entire mpa 1154 * start reply message including private data. And 1155 * the MPA header is valid. 1156 */ 1157 state_set(&ep->com, FPDU_MODE); 1158 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1159 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1160 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1161 ep->mpa_attr.version = mpa->revision; 1162 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1163 1164 if (mpa->revision == 2) { 1165 ep->mpa_attr.enhanced_rdma_conn = 1166 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1167 if (ep->mpa_attr.enhanced_rdma_conn) { 1168 mpa_v2_params = (struct mpa_v2_conn_params *) 1169 (ep->mpa_pkt + sizeof(*mpa)); 1170 resp_ird = ntohs(mpa_v2_params->ird) & 1171 MPA_V2_IRD_ORD_MASK; 1172 resp_ord = ntohs(mpa_v2_params->ord) & 1173 MPA_V2_IRD_ORD_MASK; 1174 1175 /* 1176 * This is a double-check. Ideally, below checks are 1177 * not required since ird/ord stuff has been taken 1178 * care of in c4iw_accept_cr 1179 */ 1180 if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) { 1181 err = -ENOMEM; 1182 ep->ird = resp_ord; 1183 ep->ord = resp_ird; 1184 insuff_ird = 1; 1185 } 1186 1187 if (ntohs(mpa_v2_params->ird) & 1188 MPA_V2_PEER2PEER_MODEL) { 1189 if (ntohs(mpa_v2_params->ord) & 1190 MPA_V2_RDMA_WRITE_RTR) 1191 ep->mpa_attr.p2p_type = 1192 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1193 else if (ntohs(mpa_v2_params->ord) & 1194 MPA_V2_RDMA_READ_RTR) 1195 ep->mpa_attr.p2p_type = 1196 FW_RI_INIT_P2PTYPE_READ_REQ; 1197 } 1198 } 1199 } else if (mpa->revision == 1) 1200 if (peer2peer) 1201 ep->mpa_attr.p2p_type = p2p_type; 1202 1203 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 1204 "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = " 1205 "%d\n", __func__, ep->mpa_attr.crc_enabled, 1206 ep->mpa_attr.recv_marker_enabled, 1207 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1208 ep->mpa_attr.p2p_type, p2p_type); 1209 1210 /* 1211 * If responder's RTR does not match with that of initiator, assign 1212 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not 1213 * generated when moving QP to RTS state. 1214 * A TERM message will be sent after QP has moved to RTS state 1215 */ 1216 if ((ep->mpa_attr.version == 2) && peer2peer && 1217 (ep->mpa_attr.p2p_type != p2p_type)) { 1218 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1219 rtr_mismatch = 1; 1220 } 1221 1222 attrs.mpa_attr = ep->mpa_attr; 1223 attrs.max_ird = ep->ird; 1224 attrs.max_ord = ep->ord; 1225 attrs.llp_stream_handle = ep; 1226 attrs.next_state = C4IW_QP_STATE_RTS; 1227 1228 mask = C4IW_QP_ATTR_NEXT_STATE | 1229 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | 1230 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; 1231 1232 /* bind QP and TID with INIT_WR */ 1233 err = c4iw_modify_qp(ep->com.qp->rhp, 1234 ep->com.qp, mask, &attrs, 1); 1235 if (err) 1236 goto err; 1237 1238 /* 1239 * If responder's RTR requirement did not match with what initiator 1240 * supports, generate TERM message 1241 */ 1242 if (rtr_mismatch) { 1243 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__); 1244 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1245 attrs.ecode = MPA_NOMATCH_RTR; 1246 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1247 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1248 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1249 err = -ENOMEM; 1250 goto out; 1251 } 1252 1253 /* 1254 * Generate TERM if initiator IRD is not sufficient for responder 1255 * provided ORD. Currently, we do the same behaviour even when 1256 * responder provided IRD is also not sufficient as regards to 1257 * initiator ORD. 1258 */ 1259 if (insuff_ird) { 1260 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n", 1261 __func__); 1262 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1263 attrs.ecode = MPA_INSUFF_IRD; 1264 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1265 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1266 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1267 err = -ENOMEM; 1268 goto out; 1269 } 1270 goto out; 1271 err: 1272 state_set(&ep->com, ABORTING); 1273 send_abort(ep, skb, GFP_KERNEL); 1274 out: 1275 connect_reply_upcall(ep, err); 1276 return; 1277 } 1278 1279 static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) 1280 { 1281 struct mpa_message *mpa; 1282 struct mpa_v2_conn_params *mpa_v2_params; 1283 u16 plen; 1284 1285 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1286 1287 if (state_read(&ep->com) != MPA_REQ_WAIT) 1288 return; 1289 1290 /* 1291 * If we get more than the supported amount of private data 1292 * then we must fail this connection. 1293 */ 1294 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 1295 stop_ep_timer(ep); 1296 abort_connection(ep, skb, GFP_KERNEL); 1297 return; 1298 } 1299 1300 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); 1301 1302 /* 1303 * Copy the new data into our accumulation buffer. 1304 */ 1305 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 1306 skb->len); 1307 ep->mpa_pkt_len += skb->len; 1308 1309 /* 1310 * If we don't even have the mpa message, then bail. 1311 * We'll continue process when more data arrives. 1312 */ 1313 if (ep->mpa_pkt_len < sizeof(*mpa)) 1314 return; 1315 1316 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); 1317 stop_ep_timer(ep); 1318 mpa = (struct mpa_message *) ep->mpa_pkt; 1319 1320 /* 1321 * Validate MPA Header. 1322 */ 1323 if (mpa->revision > mpa_rev) { 1324 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," 1325 " Received = %d\n", __func__, mpa_rev, mpa->revision); 1326 stop_ep_timer(ep); 1327 abort_connection(ep, skb, GFP_KERNEL); 1328 return; 1329 } 1330 1331 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { 1332 stop_ep_timer(ep); 1333 abort_connection(ep, skb, GFP_KERNEL); 1334 return; 1335 } 1336 1337 plen = ntohs(mpa->private_data_size); 1338 1339 /* 1340 * Fail if there's too much private data. 1341 */ 1342 if (plen > MPA_MAX_PRIVATE_DATA) { 1343 stop_ep_timer(ep); 1344 abort_connection(ep, skb, GFP_KERNEL); 1345 return; 1346 } 1347 1348 /* 1349 * If plen does not account for pkt size 1350 */ 1351 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1352 stop_ep_timer(ep); 1353 abort_connection(ep, skb, GFP_KERNEL); 1354 return; 1355 } 1356 ep->plen = (u8) plen; 1357 1358 /* 1359 * If we don't have all the pdata yet, then bail. 1360 */ 1361 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1362 return; 1363 1364 /* 1365 * If we get here we have accumulated the entire mpa 1366 * start reply message including private data. 1367 */ 1368 ep->mpa_attr.initiator = 0; 1369 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1370 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1371 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1372 ep->mpa_attr.version = mpa->revision; 1373 if (mpa->revision == 1) 1374 ep->tried_with_mpa_v1 = 1; 1375 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1376 1377 if (mpa->revision == 2) { 1378 ep->mpa_attr.enhanced_rdma_conn = 1379 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1380 if (ep->mpa_attr.enhanced_rdma_conn) { 1381 mpa_v2_params = (struct mpa_v2_conn_params *) 1382 (ep->mpa_pkt + sizeof(*mpa)); 1383 ep->ird = ntohs(mpa_v2_params->ird) & 1384 MPA_V2_IRD_ORD_MASK; 1385 ep->ord = ntohs(mpa_v2_params->ord) & 1386 MPA_V2_IRD_ORD_MASK; 1387 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) 1388 if (peer2peer) { 1389 if (ntohs(mpa_v2_params->ord) & 1390 MPA_V2_RDMA_WRITE_RTR) 1391 ep->mpa_attr.p2p_type = 1392 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1393 else if (ntohs(mpa_v2_params->ord) & 1394 MPA_V2_RDMA_READ_RTR) 1395 ep->mpa_attr.p2p_type = 1396 FW_RI_INIT_P2PTYPE_READ_REQ; 1397 } 1398 } 1399 } else if (mpa->revision == 1) 1400 if (peer2peer) 1401 ep->mpa_attr.p2p_type = p2p_type; 1402 1403 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 1404 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__, 1405 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 1406 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1407 ep->mpa_attr.p2p_type); 1408 1409 state_set(&ep->com, MPA_REQ_RCVD); 1410 1411 /* drive upcall */ 1412 connect_request_upcall(ep); 1413 return; 1414 } 1415 1416 static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) 1417 { 1418 struct c4iw_ep *ep; 1419 struct cpl_rx_data *hdr = cplhdr(skb); 1420 unsigned int dlen = ntohs(hdr->len); 1421 unsigned int tid = GET_TID(hdr); 1422 struct tid_info *t = dev->rdev.lldi.tids; 1423 __u8 status = hdr->status; 1424 1425 ep = lookup_tid(t, tid); 1426 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); 1427 skb_pull(skb, sizeof(*hdr)); 1428 skb_trim(skb, dlen); 1429 1430 /* update RX credits */ 1431 update_rx_credits(ep, dlen); 1432 1433 switch (state_read(&ep->com)) { 1434 case MPA_REQ_SENT: 1435 ep->rcv_seq += dlen; 1436 process_mpa_reply(ep, skb); 1437 break; 1438 case MPA_REQ_WAIT: 1439 ep->rcv_seq += dlen; 1440 process_mpa_request(ep, skb); 1441 break; 1442 case FPDU_MODE: { 1443 struct c4iw_qp_attributes attrs; 1444 BUG_ON(!ep->com.qp); 1445 if (status) 1446 pr_err("%s Unexpected streaming data." \ 1447 " qpid %u ep %p state %d tid %u status %d\n", 1448 __func__, ep->com.qp->wq.sq.qid, ep, 1449 state_read(&ep->com), ep->hwtid, status); 1450 attrs.next_state = C4IW_QP_STATE_ERROR; 1451 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1452 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1453 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); 1454 break; 1455 } 1456 default: 1457 break; 1458 } 1459 return 0; 1460 } 1461 1462 static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1463 { 1464 struct c4iw_ep *ep; 1465 struct cpl_abort_rpl_rss *rpl = cplhdr(skb); 1466 int release = 0; 1467 unsigned int tid = GET_TID(rpl); 1468 struct tid_info *t = dev->rdev.lldi.tids; 1469 1470 ep = lookup_tid(t, tid); 1471 if (!ep) { 1472 printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n"); 1473 return 0; 1474 } 1475 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1476 mutex_lock(&ep->com.mutex); 1477 switch (ep->com.state) { 1478 case ABORTING: 1479 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 1480 __state_set(&ep->com, DEAD); 1481 release = 1; 1482 break; 1483 default: 1484 printk(KERN_ERR "%s ep %p state %d\n", 1485 __func__, ep, ep->com.state); 1486 break; 1487 } 1488 mutex_unlock(&ep->com.mutex); 1489 1490 if (release) 1491 release_ep_resources(ep); 1492 return 0; 1493 } 1494 1495 static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) 1496 { 1497 struct sk_buff *skb; 1498 struct fw_ofld_connection_wr *req; 1499 unsigned int mtu_idx; 1500 int wscale; 1501 1502 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1503 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); 1504 memset(req, 0, sizeof(*req)); 1505 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); 1506 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); 1507 req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, 1508 ep->l2t)); 1509 req->le.lport = ep->com.local_addr.sin_port; 1510 req->le.pport = ep->com.remote_addr.sin_port; 1511 req->le.u.ipv4.lip = ep->com.local_addr.sin_addr.s_addr; 1512 req->le.u.ipv4.pip = ep->com.remote_addr.sin_addr.s_addr; 1513 req->tcb.t_state_to_astid = 1514 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) | 1515 V_FW_OFLD_CONNECTION_WR_ASTID(atid)); 1516 req->tcb.cplrxdataack_cplpassacceptrpl = 1517 htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK); 1518 req->tcb.tx_max = (__force __be32) jiffies; 1519 req->tcb.rcv_adv = htons(1); 1520 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 1521 wscale = compute_wscale(rcv_win); 1522 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) | 1523 (nocong ? NO_CONG(1) : 0) | 1524 KEEP_ALIVE(1) | 1525 DELACK(1) | 1526 WND_SCALE(wscale) | 1527 MSS_IDX(mtu_idx) | 1528 L2T_IDX(ep->l2t->idx) | 1529 TX_CHAN(ep->tx_chan) | 1530 SMAC_SEL(ep->smac_idx) | 1531 DSCP(ep->tos) | 1532 ULP_MODE(ULP_MODE_TCPDDP) | 1533 RCV_BUFSIZ(rcv_win >> 10)); 1534 req->tcb.opt2 = (__force __be32) (PACE(1) | 1535 TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | 1536 RX_CHANNEL(0) | 1537 CCTRL_ECN(enable_ecn) | 1538 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid)); 1539 if (enable_tcp_timestamps) 1540 req->tcb.opt2 |= (__force __be32) TSTAMPS_EN(1); 1541 if (enable_tcp_sack) 1542 req->tcb.opt2 |= (__force __be32) SACK_EN(1); 1543 if (wscale && enable_tcp_window_scaling) 1544 req->tcb.opt2 |= (__force __be32) WND_SCALE_EN(1); 1545 req->tcb.opt0 = cpu_to_be64((__force u64) req->tcb.opt0); 1546 req->tcb.opt2 = cpu_to_be32((__force u32) req->tcb.opt2); 1547 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); 1548 set_bit(ACT_OFLD_CONN, &ep->com.history); 1549 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1550 } 1551 1552 /* 1553 * Return whether a failed active open has allocated a TID 1554 */ 1555 static inline int act_open_has_tid(int status) 1556 { 1557 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && 1558 status != CPL_ERR_ARP_MISS; 1559 } 1560 1561 #define ACT_OPEN_RETRY_COUNT 2 1562 1563 static int c4iw_reconnect(struct c4iw_ep *ep) 1564 { 1565 int err = 0; 1566 struct rtable *rt; 1567 struct port_info *pi; 1568 struct net_device *pdev; 1569 int step; 1570 struct neighbour *neigh; 1571 1572 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); 1573 init_timer(&ep->timer); 1574 1575 /* 1576 * Allocate an active TID to initiate a TCP connection. 1577 */ 1578 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep); 1579 if (ep->atid == -1) { 1580 pr_err("%s - cannot alloc atid.\n", __func__); 1581 err = -ENOMEM; 1582 goto fail2; 1583 } 1584 insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid); 1585 1586 /* find a route */ 1587 rt = find_route(ep->com.dev, 1588 ep->com.cm_id->local_addr.sin_addr.s_addr, 1589 ep->com.cm_id->remote_addr.sin_addr.s_addr, 1590 ep->com.cm_id->local_addr.sin_port, 1591 ep->com.cm_id->remote_addr.sin_port, 0); 1592 if (!rt) { 1593 pr_err("%s - cannot find route.\n", __func__); 1594 err = -EHOSTUNREACH; 1595 goto fail3; 1596 } 1597 ep->dst = &rt->dst; 1598 1599 neigh = dst_neigh_lookup(ep->dst, 1600 &ep->com.cm_id->remote_addr.sin_addr.s_addr); 1601 if (!neigh) { 1602 pr_err("%s - cannot alloc neigh.\n", __func__); 1603 err = -ENOMEM; 1604 goto fail4; 1605 } 1606 1607 /* get a l2t entry */ 1608 if (neigh->dev->flags & IFF_LOOPBACK) { 1609 PDBG("%s LOOPBACK\n", __func__); 1610 pdev = ip_dev_find(&init_net, 1611 ep->com.cm_id->remote_addr.sin_addr.s_addr); 1612 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, 1613 neigh, pdev, 0); 1614 pi = (struct port_info *)netdev_priv(pdev); 1615 ep->mtu = pdev->mtu; 1616 ep->tx_chan = cxgb4_port_chan(pdev); 1617 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; 1618 dev_put(pdev); 1619 } else { 1620 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, 1621 neigh, neigh->dev, 0); 1622 pi = (struct port_info *)netdev_priv(neigh->dev); 1623 ep->mtu = dst_mtu(ep->dst); 1624 ep->tx_chan = cxgb4_port_chan(neigh->dev); 1625 ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 1626 0x7F) << 1; 1627 } 1628 1629 step = ep->com.dev->rdev.lldi.ntxq / ep->com.dev->rdev.lldi.nchan; 1630 ep->txq_idx = pi->port_id * step; 1631 ep->ctrlq_idx = pi->port_id; 1632 step = ep->com.dev->rdev.lldi.nrxq / ep->com.dev->rdev.lldi.nchan; 1633 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[pi->port_id * step]; 1634 1635 if (!ep->l2t) { 1636 pr_err("%s - cannot alloc l2e.\n", __func__); 1637 err = -ENOMEM; 1638 goto fail4; 1639 } 1640 1641 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 1642 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 1643 ep->l2t->idx); 1644 1645 state_set(&ep->com, CONNECTING); 1646 ep->tos = 0; 1647 1648 /* send connect request to rnic */ 1649 err = send_connect(ep); 1650 if (!err) 1651 goto out; 1652 1653 cxgb4_l2t_release(ep->l2t); 1654 fail4: 1655 dst_release(ep->dst); 1656 fail3: 1657 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 1658 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 1659 fail2: 1660 /* 1661 * remember to send notification to upper layer. 1662 * We are in here so the upper layer is not aware that this is 1663 * re-connect attempt and so, upper layer is still waiting for 1664 * response of 1st connect request. 1665 */ 1666 connect_reply_upcall(ep, -ECONNRESET); 1667 c4iw_put_ep(&ep->com); 1668 out: 1669 return err; 1670 } 1671 1672 static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1673 { 1674 struct c4iw_ep *ep; 1675 struct cpl_act_open_rpl *rpl = cplhdr(skb); 1676 unsigned int atid = GET_TID_TID(GET_AOPEN_ATID( 1677 ntohl(rpl->atid_status))); 1678 struct tid_info *t = dev->rdev.lldi.tids; 1679 int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status)); 1680 1681 ep = lookup_atid(t, atid); 1682 1683 PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, 1684 status, status2errno(status)); 1685 1686 if (status == CPL_ERR_RTX_NEG_ADVICE) { 1687 printk(KERN_WARNING MOD "Connection problems for atid %u\n", 1688 atid); 1689 return 0; 1690 } 1691 1692 set_bit(ACT_OPEN_RPL, &ep->com.history); 1693 1694 /* 1695 * Log interesting failures. 1696 */ 1697 switch (status) { 1698 case CPL_ERR_CONN_RESET: 1699 case CPL_ERR_CONN_TIMEDOUT: 1700 break; 1701 case CPL_ERR_TCAM_FULL: 1702 dev->rdev.stats.tcam_full++; 1703 if (dev->rdev.lldi.enable_fw_ofld_conn) { 1704 mutex_lock(&dev->rdev.stats.lock); 1705 mutex_unlock(&dev->rdev.stats.lock); 1706 send_fw_act_open_req(ep, 1707 GET_TID_TID(GET_AOPEN_ATID( 1708 ntohl(rpl->atid_status)))); 1709 return 0; 1710 } 1711 break; 1712 case CPL_ERR_CONN_EXIST: 1713 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 1714 set_bit(ACT_RETRY_INUSE, &ep->com.history); 1715 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, 1716 atid); 1717 cxgb4_free_atid(t, atid); 1718 dst_release(ep->dst); 1719 cxgb4_l2t_release(ep->l2t); 1720 c4iw_reconnect(ep); 1721 return 0; 1722 } 1723 break; 1724 default: 1725 printk(KERN_INFO MOD "Active open failure - " 1726 "atid %u status %u errno %d %pI4:%u->%pI4:%u\n", 1727 atid, status, status2errno(status), 1728 &ep->com.local_addr.sin_addr.s_addr, 1729 ntohs(ep->com.local_addr.sin_port), 1730 &ep->com.remote_addr.sin_addr.s_addr, 1731 ntohs(ep->com.remote_addr.sin_port)); 1732 break; 1733 } 1734 1735 connect_reply_upcall(ep, status2errno(status)); 1736 state_set(&ep->com, DEAD); 1737 1738 if (status && act_open_has_tid(status)) 1739 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); 1740 1741 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); 1742 cxgb4_free_atid(t, atid); 1743 dst_release(ep->dst); 1744 cxgb4_l2t_release(ep->l2t); 1745 c4iw_put_ep(&ep->com); 1746 1747 return 0; 1748 } 1749 1750 static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1751 { 1752 struct cpl_pass_open_rpl *rpl = cplhdr(skb); 1753 struct tid_info *t = dev->rdev.lldi.tids; 1754 unsigned int stid = GET_TID(rpl); 1755 struct c4iw_listen_ep *ep = lookup_stid(t, stid); 1756 1757 if (!ep) { 1758 PDBG("%s stid %d lookup failure!\n", __func__, stid); 1759 goto out; 1760 } 1761 PDBG("%s ep %p status %d error %d\n", __func__, ep, 1762 rpl->status, status2errno(rpl->status)); 1763 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); 1764 1765 out: 1766 return 0; 1767 } 1768 1769 static int listen_stop(struct c4iw_listen_ep *ep) 1770 { 1771 struct sk_buff *skb; 1772 struct cpl_close_listsvr_req *req; 1773 1774 PDBG("%s ep %p\n", __func__, ep); 1775 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1776 if (!skb) { 1777 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); 1778 return -ENOMEM; 1779 } 1780 req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req)); 1781 INIT_TP_WR(req, 0); 1782 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, 1783 ep->stid)); 1784 req->reply_ctrl = cpu_to_be16( 1785 QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0])); 1786 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); 1787 return c4iw_ofld_send(&ep->com.dev->rdev, skb); 1788 } 1789 1790 static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1791 { 1792 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); 1793 struct tid_info *t = dev->rdev.lldi.tids; 1794 unsigned int stid = GET_TID(rpl); 1795 struct c4iw_listen_ep *ep = lookup_stid(t, stid); 1796 1797 PDBG("%s ep %p\n", __func__, ep); 1798 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); 1799 return 0; 1800 } 1801 1802 static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb, 1803 struct cpl_pass_accept_req *req) 1804 { 1805 struct cpl_pass_accept_rpl *rpl; 1806 unsigned int mtu_idx; 1807 u64 opt0; 1808 u32 opt2; 1809 int wscale; 1810 1811 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1812 BUG_ON(skb_cloned(skb)); 1813 skb_trim(skb, sizeof(*rpl)); 1814 skb_get(skb); 1815 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 1816 wscale = compute_wscale(rcv_win); 1817 opt0 = (nocong ? NO_CONG(1) : 0) | 1818 KEEP_ALIVE(1) | 1819 DELACK(1) | 1820 WND_SCALE(wscale) | 1821 MSS_IDX(mtu_idx) | 1822 L2T_IDX(ep->l2t->idx) | 1823 TX_CHAN(ep->tx_chan) | 1824 SMAC_SEL(ep->smac_idx) | 1825 DSCP(ep->tos >> 2) | 1826 ULP_MODE(ULP_MODE_TCPDDP) | 1827 RCV_BUFSIZ(rcv_win>>10); 1828 opt2 = RX_CHANNEL(0) | 1829 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 1830 1831 if (enable_tcp_timestamps && req->tcpopt.tstamp) 1832 opt2 |= TSTAMPS_EN(1); 1833 if (enable_tcp_sack && req->tcpopt.sack) 1834 opt2 |= SACK_EN(1); 1835 if (wscale && enable_tcp_window_scaling) 1836 opt2 |= WND_SCALE_EN(1); 1837 if (enable_ecn) { 1838 const struct tcphdr *tcph; 1839 u32 hlen = ntohl(req->hdr_len); 1840 1841 tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) + 1842 G_IP_HDR_LEN(hlen); 1843 if (tcph->ece && tcph->cwr) 1844 opt2 |= CCTRL_ECN(1); 1845 } 1846 1847 rpl = cplhdr(skb); 1848 INIT_TP_WR(rpl, ep->hwtid); 1849 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, 1850 ep->hwtid)); 1851 rpl->opt0 = cpu_to_be64(opt0); 1852 rpl->opt2 = cpu_to_be32(opt2); 1853 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 1854 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1855 1856 return; 1857 } 1858 1859 static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip, 1860 struct sk_buff *skb) 1861 { 1862 PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid, 1863 peer_ip); 1864 BUG_ON(skb_cloned(skb)); 1865 skb_trim(skb, sizeof(struct cpl_tid_release)); 1866 skb_get(skb); 1867 release_tid(&dev->rdev, hwtid, skb); 1868 return; 1869 } 1870 1871 static void get_4tuple(struct cpl_pass_accept_req *req, 1872 __be32 *local_ip, __be32 *peer_ip, 1873 __be16 *local_port, __be16 *peer_port) 1874 { 1875 int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len)); 1876 int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len)); 1877 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len); 1878 struct tcphdr *tcp = (struct tcphdr *) 1879 ((u8 *)(req + 1) + eth_len + ip_len); 1880 1881 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__, 1882 ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source), 1883 ntohs(tcp->dest)); 1884 1885 *peer_ip = ip->saddr; 1886 *local_ip = ip->daddr; 1887 *peer_port = tcp->source; 1888 *local_port = tcp->dest; 1889 1890 return; 1891 } 1892 1893 static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst, 1894 struct c4iw_dev *cdev, bool clear_mpa_v1) 1895 { 1896 struct neighbour *n; 1897 int err, step; 1898 1899 n = dst_neigh_lookup(dst, &peer_ip); 1900 if (!n) 1901 return -ENODEV; 1902 1903 rcu_read_lock(); 1904 err = -ENOMEM; 1905 if (n->dev->flags & IFF_LOOPBACK) { 1906 struct net_device *pdev; 1907 1908 pdev = ip_dev_find(&init_net, peer_ip); 1909 if (!pdev) { 1910 err = -ENODEV; 1911 goto out; 1912 } 1913 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, 1914 n, pdev, 0); 1915 if (!ep->l2t) 1916 goto out; 1917 ep->mtu = pdev->mtu; 1918 ep->tx_chan = cxgb4_port_chan(pdev); 1919 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; 1920 step = cdev->rdev.lldi.ntxq / 1921 cdev->rdev.lldi.nchan; 1922 ep->txq_idx = cxgb4_port_idx(pdev) * step; 1923 step = cdev->rdev.lldi.nrxq / 1924 cdev->rdev.lldi.nchan; 1925 ep->ctrlq_idx = cxgb4_port_idx(pdev); 1926 ep->rss_qid = cdev->rdev.lldi.rxq_ids[ 1927 cxgb4_port_idx(pdev) * step]; 1928 dev_put(pdev); 1929 } else { 1930 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, 1931 n, n->dev, 0); 1932 if (!ep->l2t) 1933 goto out; 1934 ep->mtu = dst_mtu(dst); 1935 ep->tx_chan = cxgb4_port_chan(n->dev); 1936 ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1; 1937 step = cdev->rdev.lldi.ntxq / 1938 cdev->rdev.lldi.nchan; 1939 ep->txq_idx = cxgb4_port_idx(n->dev) * step; 1940 ep->ctrlq_idx = cxgb4_port_idx(n->dev); 1941 step = cdev->rdev.lldi.nrxq / 1942 cdev->rdev.lldi.nchan; 1943 ep->rss_qid = cdev->rdev.lldi.rxq_ids[ 1944 cxgb4_port_idx(n->dev) * step]; 1945 1946 if (clear_mpa_v1) { 1947 ep->retry_with_mpa_v1 = 0; 1948 ep->tried_with_mpa_v1 = 0; 1949 } 1950 } 1951 err = 0; 1952 out: 1953 rcu_read_unlock(); 1954 1955 neigh_release(n); 1956 1957 return err; 1958 } 1959 1960 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) 1961 { 1962 struct c4iw_ep *child_ep = NULL, *parent_ep; 1963 struct cpl_pass_accept_req *req = cplhdr(skb); 1964 unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); 1965 struct tid_info *t = dev->rdev.lldi.tids; 1966 unsigned int hwtid = GET_TID(req); 1967 struct dst_entry *dst; 1968 struct rtable *rt; 1969 __be32 local_ip, peer_ip = 0; 1970 __be16 local_port, peer_port; 1971 int err; 1972 u16 peer_mss = ntohs(req->tcpopt.mss); 1973 1974 parent_ep = lookup_stid(t, stid); 1975 if (!parent_ep) { 1976 PDBG("%s connect request on invalid stid %d\n", __func__, stid); 1977 goto reject; 1978 } 1979 get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port); 1980 1981 PDBG("%s parent ep %p hwtid %u laddr 0x%x raddr 0x%x lport %d " \ 1982 "rport %d peer_mss %d\n", __func__, parent_ep, hwtid, 1983 ntohl(local_ip), ntohl(peer_ip), ntohs(local_port), 1984 ntohs(peer_port), peer_mss); 1985 1986 if (state_read(&parent_ep->com) != LISTEN) { 1987 printk(KERN_ERR "%s - listening ep not in LISTEN\n", 1988 __func__); 1989 goto reject; 1990 } 1991 1992 /* Find output route */ 1993 rt = find_route(dev, local_ip, peer_ip, local_port, peer_port, 1994 GET_POPEN_TOS(ntohl(req->tos_stid))); 1995 if (!rt) { 1996 printk(KERN_ERR MOD "%s - failed to find dst entry!\n", 1997 __func__); 1998 goto reject; 1999 } 2000 dst = &rt->dst; 2001 2002 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); 2003 if (!child_ep) { 2004 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", 2005 __func__); 2006 dst_release(dst); 2007 goto reject; 2008 } 2009 2010 err = import_ep(child_ep, peer_ip, dst, dev, false); 2011 if (err) { 2012 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", 2013 __func__); 2014 dst_release(dst); 2015 kfree(child_ep); 2016 goto reject; 2017 } 2018 2019 if (peer_mss && child_ep->mtu > (peer_mss + 40)) 2020 child_ep->mtu = peer_mss + 40; 2021 2022 state_set(&child_ep->com, CONNECTING); 2023 child_ep->com.dev = dev; 2024 child_ep->com.cm_id = NULL; 2025 child_ep->com.local_addr.sin_family = PF_INET; 2026 child_ep->com.local_addr.sin_port = local_port; 2027 child_ep->com.local_addr.sin_addr.s_addr = local_ip; 2028 child_ep->com.remote_addr.sin_family = PF_INET; 2029 child_ep->com.remote_addr.sin_port = peer_port; 2030 child_ep->com.remote_addr.sin_addr.s_addr = peer_ip; 2031 c4iw_get_ep(&parent_ep->com); 2032 child_ep->parent_ep = parent_ep; 2033 child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid)); 2034 child_ep->dst = dst; 2035 child_ep->hwtid = hwtid; 2036 2037 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__, 2038 child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid); 2039 2040 init_timer(&child_ep->timer); 2041 cxgb4_insert_tid(t, child_ep, hwtid); 2042 insert_handle(dev, &dev->hwtid_idr, child_ep, child_ep->hwtid); 2043 accept_cr(child_ep, peer_ip, skb, req); 2044 set_bit(PASS_ACCEPT_REQ, &child_ep->com.history); 2045 goto out; 2046 reject: 2047 reject_cr(dev, hwtid, peer_ip, skb); 2048 out: 2049 return 0; 2050 } 2051 2052 static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) 2053 { 2054 struct c4iw_ep *ep; 2055 struct cpl_pass_establish *req = cplhdr(skb); 2056 struct tid_info *t = dev->rdev.lldi.tids; 2057 unsigned int tid = GET_TID(req); 2058 2059 ep = lookup_tid(t, tid); 2060 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2061 ep->snd_seq = be32_to_cpu(req->snd_isn); 2062 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 2063 2064 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid, 2065 ntohs(req->tcp_opt)); 2066 2067 set_emss(ep, ntohs(req->tcp_opt)); 2068 2069 dst_confirm(ep->dst); 2070 state_set(&ep->com, MPA_REQ_WAIT); 2071 start_ep_timer(ep); 2072 send_flowc(ep, skb); 2073 set_bit(PASS_ESTAB, &ep->com.history); 2074 2075 return 0; 2076 } 2077 2078 static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) 2079 { 2080 struct cpl_peer_close *hdr = cplhdr(skb); 2081 struct c4iw_ep *ep; 2082 struct c4iw_qp_attributes attrs; 2083 int disconnect = 1; 2084 int release = 0; 2085 struct tid_info *t = dev->rdev.lldi.tids; 2086 unsigned int tid = GET_TID(hdr); 2087 int ret; 2088 2089 ep = lookup_tid(t, tid); 2090 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2091 dst_confirm(ep->dst); 2092 2093 set_bit(PEER_CLOSE, &ep->com.history); 2094 mutex_lock(&ep->com.mutex); 2095 switch (ep->com.state) { 2096 case MPA_REQ_WAIT: 2097 __state_set(&ep->com, CLOSING); 2098 break; 2099 case MPA_REQ_SENT: 2100 __state_set(&ep->com, CLOSING); 2101 connect_reply_upcall(ep, -ECONNRESET); 2102 break; 2103 case MPA_REQ_RCVD: 2104 2105 /* 2106 * We're gonna mark this puppy DEAD, but keep 2107 * the reference on it until the ULP accepts or 2108 * rejects the CR. Also wake up anyone waiting 2109 * in rdma connection migration (see c4iw_accept_cr()). 2110 */ 2111 __state_set(&ep->com, CLOSING); 2112 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 2113 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 2114 break; 2115 case MPA_REP_SENT: 2116 __state_set(&ep->com, CLOSING); 2117 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 2118 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 2119 break; 2120 case FPDU_MODE: 2121 start_ep_timer(ep); 2122 __state_set(&ep->com, CLOSING); 2123 attrs.next_state = C4IW_QP_STATE_CLOSING; 2124 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2125 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2126 if (ret != -ECONNRESET) { 2127 peer_close_upcall(ep); 2128 disconnect = 1; 2129 } 2130 break; 2131 case ABORTING: 2132 disconnect = 0; 2133 break; 2134 case CLOSING: 2135 __state_set(&ep->com, MORIBUND); 2136 disconnect = 0; 2137 break; 2138 case MORIBUND: 2139 stop_ep_timer(ep); 2140 if (ep->com.cm_id && ep->com.qp) { 2141 attrs.next_state = C4IW_QP_STATE_IDLE; 2142 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2143 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2144 } 2145 close_complete_upcall(ep); 2146 __state_set(&ep->com, DEAD); 2147 release = 1; 2148 disconnect = 0; 2149 break; 2150 case DEAD: 2151 disconnect = 0; 2152 break; 2153 default: 2154 BUG_ON(1); 2155 } 2156 mutex_unlock(&ep->com.mutex); 2157 if (disconnect) 2158 c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 2159 if (release) 2160 release_ep_resources(ep); 2161 return 0; 2162 } 2163 2164 /* 2165 * Returns whether an ABORT_REQ_RSS message is a negative advice. 2166 */ 2167 static int is_neg_adv_abort(unsigned int status) 2168 { 2169 return status == CPL_ERR_RTX_NEG_ADVICE || 2170 status == CPL_ERR_PERSIST_NEG_ADVICE; 2171 } 2172 2173 static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) 2174 { 2175 struct cpl_abort_req_rss *req = cplhdr(skb); 2176 struct c4iw_ep *ep; 2177 struct cpl_abort_rpl *rpl; 2178 struct sk_buff *rpl_skb; 2179 struct c4iw_qp_attributes attrs; 2180 int ret; 2181 int release = 0; 2182 struct tid_info *t = dev->rdev.lldi.tids; 2183 unsigned int tid = GET_TID(req); 2184 2185 ep = lookup_tid(t, tid); 2186 if (is_neg_adv_abort(req->status)) { 2187 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, 2188 ep->hwtid); 2189 return 0; 2190 } 2191 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 2192 ep->com.state); 2193 set_bit(PEER_ABORT, &ep->com.history); 2194 2195 /* 2196 * Wake up any threads in rdma_init() or rdma_fini(). 2197 * However, this is not needed if com state is just 2198 * MPA_REQ_SENT 2199 */ 2200 if (ep->com.state != MPA_REQ_SENT) 2201 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 2202 2203 mutex_lock(&ep->com.mutex); 2204 switch (ep->com.state) { 2205 case CONNECTING: 2206 break; 2207 case MPA_REQ_WAIT: 2208 stop_ep_timer(ep); 2209 break; 2210 case MPA_REQ_SENT: 2211 stop_ep_timer(ep); 2212 if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1)) 2213 connect_reply_upcall(ep, -ECONNRESET); 2214 else { 2215 /* 2216 * we just don't send notification upwards because we 2217 * want to retry with mpa_v1 without upper layers even 2218 * knowing it. 2219 * 2220 * do some housekeeping so as to re-initiate the 2221 * connection 2222 */ 2223 PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__, 2224 mpa_rev); 2225 ep->retry_with_mpa_v1 = 1; 2226 } 2227 break; 2228 case MPA_REP_SENT: 2229 break; 2230 case MPA_REQ_RCVD: 2231 break; 2232 case MORIBUND: 2233 case CLOSING: 2234 stop_ep_timer(ep); 2235 /*FALLTHROUGH*/ 2236 case FPDU_MODE: 2237 if (ep->com.cm_id && ep->com.qp) { 2238 attrs.next_state = C4IW_QP_STATE_ERROR; 2239 ret = c4iw_modify_qp(ep->com.qp->rhp, 2240 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 2241 &attrs, 1); 2242 if (ret) 2243 printk(KERN_ERR MOD 2244 "%s - qp <- error failed!\n", 2245 __func__); 2246 } 2247 peer_abort_upcall(ep); 2248 break; 2249 case ABORTING: 2250 break; 2251 case DEAD: 2252 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); 2253 mutex_unlock(&ep->com.mutex); 2254 return 0; 2255 default: 2256 BUG_ON(1); 2257 break; 2258 } 2259 dst_confirm(ep->dst); 2260 if (ep->com.state != ABORTING) { 2261 __state_set(&ep->com, DEAD); 2262 /* we don't release if we want to retry with mpa_v1 */ 2263 if (!ep->retry_with_mpa_v1) 2264 release = 1; 2265 } 2266 mutex_unlock(&ep->com.mutex); 2267 2268 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); 2269 if (!rpl_skb) { 2270 printk(KERN_ERR MOD "%s - cannot allocate skb!\n", 2271 __func__); 2272 release = 1; 2273 goto out; 2274 } 2275 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 2276 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); 2277 INIT_TP_WR(rpl, ep->hwtid); 2278 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); 2279 rpl->cmd = CPL_ABORT_NO_RST; 2280 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); 2281 out: 2282 if (release) 2283 release_ep_resources(ep); 2284 else if (ep->retry_with_mpa_v1) { 2285 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); 2286 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); 2287 dst_release(ep->dst); 2288 cxgb4_l2t_release(ep->l2t); 2289 c4iw_reconnect(ep); 2290 } 2291 2292 return 0; 2293 } 2294 2295 static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2296 { 2297 struct c4iw_ep *ep; 2298 struct c4iw_qp_attributes attrs; 2299 struct cpl_close_con_rpl *rpl = cplhdr(skb); 2300 int release = 0; 2301 struct tid_info *t = dev->rdev.lldi.tids; 2302 unsigned int tid = GET_TID(rpl); 2303 2304 ep = lookup_tid(t, tid); 2305 2306 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2307 BUG_ON(!ep); 2308 2309 /* The cm_id may be null if we failed to connect */ 2310 mutex_lock(&ep->com.mutex); 2311 switch (ep->com.state) { 2312 case CLOSING: 2313 __state_set(&ep->com, MORIBUND); 2314 break; 2315 case MORIBUND: 2316 stop_ep_timer(ep); 2317 if ((ep->com.cm_id) && (ep->com.qp)) { 2318 attrs.next_state = C4IW_QP_STATE_IDLE; 2319 c4iw_modify_qp(ep->com.qp->rhp, 2320 ep->com.qp, 2321 C4IW_QP_ATTR_NEXT_STATE, 2322 &attrs, 1); 2323 } 2324 close_complete_upcall(ep); 2325 __state_set(&ep->com, DEAD); 2326 release = 1; 2327 break; 2328 case ABORTING: 2329 case DEAD: 2330 break; 2331 default: 2332 BUG_ON(1); 2333 break; 2334 } 2335 mutex_unlock(&ep->com.mutex); 2336 if (release) 2337 release_ep_resources(ep); 2338 return 0; 2339 } 2340 2341 static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) 2342 { 2343 struct cpl_rdma_terminate *rpl = cplhdr(skb); 2344 struct tid_info *t = dev->rdev.lldi.tids; 2345 unsigned int tid = GET_TID(rpl); 2346 struct c4iw_ep *ep; 2347 struct c4iw_qp_attributes attrs; 2348 2349 ep = lookup_tid(t, tid); 2350 BUG_ON(!ep); 2351 2352 if (ep && ep->com.qp) { 2353 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, 2354 ep->com.qp->wq.sq.qid); 2355 attrs.next_state = C4IW_QP_STATE_TERMINATE; 2356 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2357 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2358 } else 2359 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); 2360 2361 return 0; 2362 } 2363 2364 /* 2365 * Upcall from the adapter indicating data has been transmitted. 2366 * For us its just the single MPA request or reply. We can now free 2367 * the skb holding the mpa message. 2368 */ 2369 static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) 2370 { 2371 struct c4iw_ep *ep; 2372 struct cpl_fw4_ack *hdr = cplhdr(skb); 2373 u8 credits = hdr->credits; 2374 unsigned int tid = GET_TID(hdr); 2375 struct tid_info *t = dev->rdev.lldi.tids; 2376 2377 2378 ep = lookup_tid(t, tid); 2379 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); 2380 if (credits == 0) { 2381 PDBG("%s 0 credit ack ep %p tid %u state %u\n", 2382 __func__, ep, ep->hwtid, state_read(&ep->com)); 2383 return 0; 2384 } 2385 2386 dst_confirm(ep->dst); 2387 if (ep->mpa_skb) { 2388 PDBG("%s last streaming msg ack ep %p tid %u state %u " 2389 "initiator %u freeing skb\n", __func__, ep, ep->hwtid, 2390 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); 2391 kfree_skb(ep->mpa_skb); 2392 ep->mpa_skb = NULL; 2393 } 2394 return 0; 2395 } 2396 2397 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 2398 { 2399 int err; 2400 struct c4iw_ep *ep = to_ep(cm_id); 2401 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2402 2403 if (state_read(&ep->com) == DEAD) { 2404 c4iw_put_ep(&ep->com); 2405 return -ECONNRESET; 2406 } 2407 set_bit(ULP_REJECT, &ep->com.history); 2408 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 2409 if (mpa_rev == 0) 2410 abort_connection(ep, NULL, GFP_KERNEL); 2411 else { 2412 err = send_mpa_reject(ep, pdata, pdata_len); 2413 err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 2414 } 2415 c4iw_put_ep(&ep->com); 2416 return 0; 2417 } 2418 2419 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 2420 { 2421 int err; 2422 struct c4iw_qp_attributes attrs; 2423 enum c4iw_qp_attr_mask mask; 2424 struct c4iw_ep *ep = to_ep(cm_id); 2425 struct c4iw_dev *h = to_c4iw_dev(cm_id->device); 2426 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); 2427 2428 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2429 if (state_read(&ep->com) == DEAD) { 2430 err = -ECONNRESET; 2431 goto err; 2432 } 2433 2434 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 2435 BUG_ON(!qp); 2436 2437 set_bit(ULP_ACCEPT, &ep->com.history); 2438 if ((conn_param->ord > c4iw_max_read_depth) || 2439 (conn_param->ird > c4iw_max_read_depth)) { 2440 abort_connection(ep, NULL, GFP_KERNEL); 2441 err = -EINVAL; 2442 goto err; 2443 } 2444 2445 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 2446 if (conn_param->ord > ep->ird) { 2447 ep->ird = conn_param->ird; 2448 ep->ord = conn_param->ord; 2449 send_mpa_reject(ep, conn_param->private_data, 2450 conn_param->private_data_len); 2451 abort_connection(ep, NULL, GFP_KERNEL); 2452 err = -ENOMEM; 2453 goto err; 2454 } 2455 if (conn_param->ird > ep->ord) { 2456 if (!ep->ord) 2457 conn_param->ird = 1; 2458 else { 2459 abort_connection(ep, NULL, GFP_KERNEL); 2460 err = -ENOMEM; 2461 goto err; 2462 } 2463 } 2464 2465 } 2466 ep->ird = conn_param->ird; 2467 ep->ord = conn_param->ord; 2468 2469 if (ep->mpa_attr.version != 2) 2470 if (peer2peer && ep->ird == 0) 2471 ep->ird = 1; 2472 2473 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); 2474 2475 cm_id->add_ref(cm_id); 2476 ep->com.cm_id = cm_id; 2477 ep->com.qp = qp; 2478 ref_qp(ep); 2479 2480 /* bind QP to EP and move to RTS */ 2481 attrs.mpa_attr = ep->mpa_attr; 2482 attrs.max_ird = ep->ird; 2483 attrs.max_ord = ep->ord; 2484 attrs.llp_stream_handle = ep; 2485 attrs.next_state = C4IW_QP_STATE_RTS; 2486 2487 /* bind QP and TID with INIT_WR */ 2488 mask = C4IW_QP_ATTR_NEXT_STATE | 2489 C4IW_QP_ATTR_LLP_STREAM_HANDLE | 2490 C4IW_QP_ATTR_MPA_ATTR | 2491 C4IW_QP_ATTR_MAX_IRD | 2492 C4IW_QP_ATTR_MAX_ORD; 2493 2494 err = c4iw_modify_qp(ep->com.qp->rhp, 2495 ep->com.qp, mask, &attrs, 1); 2496 if (err) 2497 goto err1; 2498 err = send_mpa_reply(ep, conn_param->private_data, 2499 conn_param->private_data_len); 2500 if (err) 2501 goto err1; 2502 2503 state_set(&ep->com, FPDU_MODE); 2504 established_upcall(ep); 2505 c4iw_put_ep(&ep->com); 2506 return 0; 2507 err1: 2508 ep->com.cm_id = NULL; 2509 cm_id->rem_ref(cm_id); 2510 err: 2511 c4iw_put_ep(&ep->com); 2512 return err; 2513 } 2514 2515 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 2516 { 2517 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2518 struct c4iw_ep *ep; 2519 struct rtable *rt; 2520 int err = 0; 2521 2522 if ((conn_param->ord > c4iw_max_read_depth) || 2523 (conn_param->ird > c4iw_max_read_depth)) { 2524 err = -EINVAL; 2525 goto out; 2526 } 2527 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 2528 if (!ep) { 2529 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 2530 err = -ENOMEM; 2531 goto out; 2532 } 2533 init_timer(&ep->timer); 2534 ep->plen = conn_param->private_data_len; 2535 if (ep->plen) 2536 memcpy(ep->mpa_pkt + sizeof(struct mpa_message), 2537 conn_param->private_data, ep->plen); 2538 ep->ird = conn_param->ird; 2539 ep->ord = conn_param->ord; 2540 2541 if (peer2peer && ep->ord == 0) 2542 ep->ord = 1; 2543 2544 cm_id->add_ref(cm_id); 2545 ep->com.dev = dev; 2546 ep->com.cm_id = cm_id; 2547 ep->com.qp = get_qhp(dev, conn_param->qpn); 2548 BUG_ON(!ep->com.qp); 2549 ref_qp(ep); 2550 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, 2551 ep->com.qp, cm_id); 2552 2553 /* 2554 * Allocate an active TID to initiate a TCP connection. 2555 */ 2556 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep); 2557 if (ep->atid == -1) { 2558 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); 2559 err = -ENOMEM; 2560 goto fail2; 2561 } 2562 insert_handle(dev, &dev->atid_idr, ep, ep->atid); 2563 2564 PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__, 2565 ntohl(cm_id->local_addr.sin_addr.s_addr), 2566 ntohs(cm_id->local_addr.sin_port), 2567 ntohl(cm_id->remote_addr.sin_addr.s_addr), 2568 ntohs(cm_id->remote_addr.sin_port)); 2569 2570 /* find a route */ 2571 rt = find_route(dev, 2572 cm_id->local_addr.sin_addr.s_addr, 2573 cm_id->remote_addr.sin_addr.s_addr, 2574 cm_id->local_addr.sin_port, 2575 cm_id->remote_addr.sin_port, 0); 2576 if (!rt) { 2577 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); 2578 err = -EHOSTUNREACH; 2579 goto fail3; 2580 } 2581 ep->dst = &rt->dst; 2582 2583 err = import_ep(ep, cm_id->remote_addr.sin_addr.s_addr, 2584 ep->dst, ep->com.dev, true); 2585 if (err) { 2586 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 2587 goto fail4; 2588 } 2589 2590 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 2591 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 2592 ep->l2t->idx); 2593 2594 state_set(&ep->com, CONNECTING); 2595 ep->tos = 0; 2596 ep->com.local_addr = cm_id->local_addr; 2597 ep->com.remote_addr = cm_id->remote_addr; 2598 2599 /* send connect request to rnic */ 2600 err = send_connect(ep); 2601 if (!err) 2602 goto out; 2603 2604 cxgb4_l2t_release(ep->l2t); 2605 fail4: 2606 dst_release(ep->dst); 2607 fail3: 2608 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 2609 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 2610 fail2: 2611 cm_id->rem_ref(cm_id); 2612 c4iw_put_ep(&ep->com); 2613 out: 2614 return err; 2615 } 2616 2617 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) 2618 { 2619 int err = 0; 2620 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2621 struct c4iw_listen_ep *ep; 2622 2623 might_sleep(); 2624 2625 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 2626 if (!ep) { 2627 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 2628 err = -ENOMEM; 2629 goto fail1; 2630 } 2631 PDBG("%s ep %p\n", __func__, ep); 2632 cm_id->add_ref(cm_id); 2633 ep->com.cm_id = cm_id; 2634 ep->com.dev = dev; 2635 ep->backlog = backlog; 2636 ep->com.local_addr = cm_id->local_addr; 2637 2638 /* 2639 * Allocate a server TID. 2640 */ 2641 if (dev->rdev.lldi.enable_fw_ofld_conn) 2642 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, PF_INET, ep); 2643 else 2644 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep); 2645 2646 if (ep->stid == -1) { 2647 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__); 2648 err = -ENOMEM; 2649 goto fail2; 2650 } 2651 insert_handle(dev, &dev->stid_idr, ep, ep->stid); 2652 state_set(&ep->com, LISTEN); 2653 if (dev->rdev.lldi.enable_fw_ofld_conn) { 2654 do { 2655 err = cxgb4_create_server_filter( 2656 ep->com.dev->rdev.lldi.ports[0], ep->stid, 2657 ep->com.local_addr.sin_addr.s_addr, 2658 ep->com.local_addr.sin_port, 2659 0, 2660 ep->com.dev->rdev.lldi.rxq_ids[0], 2661 0, 2662 0); 2663 if (err == -EBUSY) { 2664 set_current_state(TASK_UNINTERRUPTIBLE); 2665 schedule_timeout(usecs_to_jiffies(100)); 2666 } 2667 } while (err == -EBUSY); 2668 } else { 2669 c4iw_init_wr_wait(&ep->com.wr_wait); 2670 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], 2671 ep->stid, ep->com.local_addr.sin_addr.s_addr, 2672 ep->com.local_addr.sin_port, 2673 0, 2674 ep->com.dev->rdev.lldi.rxq_ids[0]); 2675 if (!err) 2676 err = c4iw_wait_for_reply(&ep->com.dev->rdev, 2677 &ep->com.wr_wait, 2678 0, 0, __func__); 2679 } 2680 if (!err) { 2681 cm_id->provider_data = ep; 2682 goto out; 2683 } 2684 pr_err("%s cxgb4_create_server/filter failed err %d " \ 2685 "stid %d laddr %08x lport %d\n", \ 2686 __func__, err, ep->stid, 2687 ntohl(ep->com.local_addr.sin_addr.s_addr), 2688 ntohs(ep->com.local_addr.sin_port)); 2689 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); 2690 fail2: 2691 cm_id->rem_ref(cm_id); 2692 c4iw_put_ep(&ep->com); 2693 fail1: 2694 out: 2695 return err; 2696 } 2697 2698 int c4iw_destroy_listen(struct iw_cm_id *cm_id) 2699 { 2700 int err; 2701 struct c4iw_listen_ep *ep = to_listen_ep(cm_id); 2702 2703 PDBG("%s ep %p\n", __func__, ep); 2704 2705 might_sleep(); 2706 state_set(&ep->com, DEAD); 2707 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn) { 2708 err = cxgb4_remove_server_filter( 2709 ep->com.dev->rdev.lldi.ports[0], ep->stid, 2710 ep->com.dev->rdev.lldi.rxq_ids[0], 0); 2711 } else { 2712 c4iw_init_wr_wait(&ep->com.wr_wait); 2713 err = listen_stop(ep); 2714 if (err) 2715 goto done; 2716 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 2717 0, 0, __func__); 2718 } 2719 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid); 2720 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); 2721 done: 2722 cm_id->rem_ref(cm_id); 2723 c4iw_put_ep(&ep->com); 2724 return err; 2725 } 2726 2727 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) 2728 { 2729 int ret = 0; 2730 int close = 0; 2731 int fatal = 0; 2732 struct c4iw_rdev *rdev; 2733 2734 mutex_lock(&ep->com.mutex); 2735 2736 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, 2737 states[ep->com.state], abrupt); 2738 2739 rdev = &ep->com.dev->rdev; 2740 if (c4iw_fatal_error(rdev)) { 2741 fatal = 1; 2742 close_complete_upcall(ep); 2743 ep->com.state = DEAD; 2744 } 2745 switch (ep->com.state) { 2746 case MPA_REQ_WAIT: 2747 case MPA_REQ_SENT: 2748 case MPA_REQ_RCVD: 2749 case MPA_REP_SENT: 2750 case FPDU_MODE: 2751 close = 1; 2752 if (abrupt) 2753 ep->com.state = ABORTING; 2754 else { 2755 ep->com.state = CLOSING; 2756 start_ep_timer(ep); 2757 } 2758 set_bit(CLOSE_SENT, &ep->com.flags); 2759 break; 2760 case CLOSING: 2761 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { 2762 close = 1; 2763 if (abrupt) { 2764 stop_ep_timer(ep); 2765 ep->com.state = ABORTING; 2766 } else 2767 ep->com.state = MORIBUND; 2768 } 2769 break; 2770 case MORIBUND: 2771 case ABORTING: 2772 case DEAD: 2773 PDBG("%s ignoring disconnect ep %p state %u\n", 2774 __func__, ep, ep->com.state); 2775 break; 2776 default: 2777 BUG(); 2778 break; 2779 } 2780 2781 if (close) { 2782 if (abrupt) { 2783 set_bit(EP_DISC_ABORT, &ep->com.history); 2784 close_complete_upcall(ep); 2785 ret = send_abort(ep, NULL, gfp); 2786 } else { 2787 set_bit(EP_DISC_CLOSE, &ep->com.history); 2788 ret = send_halfclose(ep, gfp); 2789 } 2790 if (ret) 2791 fatal = 1; 2792 } 2793 mutex_unlock(&ep->com.mutex); 2794 if (fatal) 2795 release_ep_resources(ep); 2796 return ret; 2797 } 2798 2799 static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, 2800 struct cpl_fw6_msg_ofld_connection_wr_rpl *req) 2801 { 2802 struct c4iw_ep *ep; 2803 int atid = be32_to_cpu(req->tid); 2804 2805 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, 2806 (__force u32) req->tid); 2807 if (!ep) 2808 return; 2809 2810 switch (req->retval) { 2811 case FW_ENOMEM: 2812 set_bit(ACT_RETRY_NOMEM, &ep->com.history); 2813 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 2814 send_fw_act_open_req(ep, atid); 2815 return; 2816 } 2817 case FW_EADDRINUSE: 2818 set_bit(ACT_RETRY_INUSE, &ep->com.history); 2819 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 2820 send_fw_act_open_req(ep, atid); 2821 return; 2822 } 2823 break; 2824 default: 2825 pr_info("%s unexpected ofld conn wr retval %d\n", 2826 __func__, req->retval); 2827 break; 2828 } 2829 pr_err("active ofld_connect_wr failure %d atid %d\n", 2830 req->retval, atid); 2831 mutex_lock(&dev->rdev.stats.lock); 2832 dev->rdev.stats.act_ofld_conn_fails++; 2833 mutex_unlock(&dev->rdev.stats.lock); 2834 connect_reply_upcall(ep, status2errno(req->retval)); 2835 state_set(&ep->com, DEAD); 2836 remove_handle(dev, &dev->atid_idr, atid); 2837 cxgb4_free_atid(dev->rdev.lldi.tids, atid); 2838 dst_release(ep->dst); 2839 cxgb4_l2t_release(ep->l2t); 2840 c4iw_put_ep(&ep->com); 2841 } 2842 2843 static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, 2844 struct cpl_fw6_msg_ofld_connection_wr_rpl *req) 2845 { 2846 struct sk_buff *rpl_skb; 2847 struct cpl_pass_accept_req *cpl; 2848 int ret; 2849 2850 rpl_skb = (struct sk_buff *)(unsigned long)req->cookie; 2851 BUG_ON(!rpl_skb); 2852 if (req->retval) { 2853 PDBG("%s passive open failure %d\n", __func__, req->retval); 2854 mutex_lock(&dev->rdev.stats.lock); 2855 dev->rdev.stats.pas_ofld_conn_fails++; 2856 mutex_unlock(&dev->rdev.stats.lock); 2857 kfree_skb(rpl_skb); 2858 } else { 2859 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb); 2860 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 2861 (__force u32) htonl( 2862 (__force u32) req->tid))); 2863 ret = pass_accept_req(dev, rpl_skb); 2864 if (!ret) 2865 kfree_skb(rpl_skb); 2866 } 2867 return; 2868 } 2869 2870 static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) 2871 { 2872 struct cpl_fw6_msg *rpl = cplhdr(skb); 2873 struct cpl_fw6_msg_ofld_connection_wr_rpl *req; 2874 2875 switch (rpl->type) { 2876 case FW6_TYPE_CQE: 2877 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); 2878 break; 2879 case FW6_TYPE_OFLD_CONNECTION_WR_RPL: 2880 req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data; 2881 switch (req->t_state) { 2882 case TCP_SYN_SENT: 2883 active_ofld_conn_reply(dev, skb, req); 2884 break; 2885 case TCP_SYN_RECV: 2886 passive_ofld_conn_reply(dev, skb, req); 2887 break; 2888 default: 2889 pr_err("%s unexpected ofld conn wr state %d\n", 2890 __func__, req->t_state); 2891 break; 2892 } 2893 break; 2894 } 2895 return 0; 2896 } 2897 2898 static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) 2899 { 2900 u32 l2info; 2901 u16 vlantag, len, hdr_len, eth_hdr_len; 2902 u8 intf; 2903 struct cpl_rx_pkt *cpl = cplhdr(skb); 2904 struct cpl_pass_accept_req *req; 2905 struct tcp_options_received tmp_opt; 2906 struct c4iw_dev *dev; 2907 2908 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); 2909 /* Store values from cpl_rx_pkt in temporary location. */ 2910 vlantag = (__force u16) cpl->vlan; 2911 len = (__force u16) cpl->len; 2912 l2info = (__force u32) cpl->l2info; 2913 hdr_len = (__force u16) cpl->hdr_len; 2914 intf = cpl->iff; 2915 2916 __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header)); 2917 2918 /* 2919 * We need to parse the TCP options from SYN packet. 2920 * to generate cpl_pass_accept_req. 2921 */ 2922 memset(&tmp_opt, 0, sizeof(tmp_opt)); 2923 tcp_clear_options(&tmp_opt); 2924 tcp_parse_options(skb, &tmp_opt, 0, NULL); 2925 2926 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); 2927 memset(req, 0, sizeof(*req)); 2928 req->l2info = cpu_to_be16(V_SYN_INTF(intf) | 2929 V_SYN_MAC_IDX(G_RX_MACIDX( 2930 (__force int) htonl(l2info))) | 2931 F_SYN_XACT_MATCH); 2932 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ? 2933 G_RX_ETHHDR_LEN((__force int) htonl(l2info)) : 2934 G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info)); 2935 req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN( 2936 (__force int) htonl(l2info))) | 2937 V_TCP_HDR_LEN(G_RX_TCPHDR_LEN( 2938 (__force int) htons(hdr_len))) | 2939 V_IP_HDR_LEN(G_RX_IPHDR_LEN( 2940 (__force int) htons(hdr_len))) | 2941 V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len))); 2942 req->vlan = (__force __be16) vlantag; 2943 req->len = (__force __be16) len; 2944 req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) | 2945 PASS_OPEN_TOS(tos)); 2946 req->tcpopt.mss = htons(tmp_opt.mss_clamp); 2947 if (tmp_opt.wscale_ok) 2948 req->tcpopt.wsf = tmp_opt.snd_wscale; 2949 req->tcpopt.tstamp = tmp_opt.saw_tstamp; 2950 if (tmp_opt.sack_ok) 2951 req->tcpopt.sack = 1; 2952 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0)); 2953 return; 2954 } 2955 2956 static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, 2957 __be32 laddr, __be16 lport, 2958 __be32 raddr, __be16 rport, 2959 u32 rcv_isn, u32 filter, u16 window, 2960 u32 rss_qid, u8 port_id) 2961 { 2962 struct sk_buff *req_skb; 2963 struct fw_ofld_connection_wr *req; 2964 struct cpl_pass_accept_req *cpl = cplhdr(skb); 2965 2966 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); 2967 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req)); 2968 memset(req, 0, sizeof(*req)); 2969 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1)); 2970 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); 2971 req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL); 2972 req->le.filter = (__force __be32) filter; 2973 req->le.lport = lport; 2974 req->le.pport = rport; 2975 req->le.u.ipv4.lip = laddr; 2976 req->le.u.ipv4.pip = raddr; 2977 req->tcb.rcv_nxt = htonl(rcv_isn + 1); 2978 req->tcb.rcv_adv = htons(window); 2979 req->tcb.t_state_to_astid = 2980 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV) | 2981 V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl->tcpopt.wsf) | 2982 V_FW_OFLD_CONNECTION_WR_ASTID( 2983 GET_PASS_OPEN_TID(ntohl(cpl->tos_stid)))); 2984 2985 /* 2986 * We store the qid in opt2 which will be used by the firmware 2987 * to send us the wr response. 2988 */ 2989 req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid)); 2990 2991 /* 2992 * We initialize the MSS index in TCB to 0xF. 2993 * So that when driver sends cpl_pass_accept_rpl 2994 * TCB picks up the correct value. If this was 0 2995 * TP will ignore any value > 0 for MSS index. 2996 */ 2997 req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF)); 2998 req->cookie = (unsigned long)skb; 2999 3000 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); 3001 cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); 3002 } 3003 3004 /* 3005 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt 3006 * messages when a filter is being used instead of server to 3007 * redirect a syn packet. When packets hit filter they are redirected 3008 * to the offload queue and driver tries to establish the connection 3009 * using firmware work request. 3010 */ 3011 static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) 3012 { 3013 int stid; 3014 unsigned int filter; 3015 struct ethhdr *eh = NULL; 3016 struct vlan_ethhdr *vlan_eh = NULL; 3017 struct iphdr *iph; 3018 struct tcphdr *tcph; 3019 struct rss_header *rss = (void *)skb->data; 3020 struct cpl_rx_pkt *cpl = (void *)skb->data; 3021 struct cpl_pass_accept_req *req = (void *)(rss + 1); 3022 struct l2t_entry *e; 3023 struct dst_entry *dst; 3024 struct rtable *rt; 3025 struct c4iw_ep *lep; 3026 u16 window; 3027 struct port_info *pi; 3028 struct net_device *pdev; 3029 u16 rss_qid, eth_hdr_len; 3030 int step; 3031 u32 tx_chan; 3032 struct neighbour *neigh; 3033 3034 /* Drop all non-SYN packets */ 3035 if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN))) 3036 goto reject; 3037 3038 /* 3039 * Drop all packets which did not hit the filter. 3040 * Unlikely to happen. 3041 */ 3042 if (!(rss->filter_hit && rss->filter_tid)) 3043 goto reject; 3044 3045 /* 3046 * Calculate the server tid from filter hit index from cpl_rx_pkt. 3047 */ 3048 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val) 3049 - dev->rdev.lldi.tids->sftid_base 3050 + dev->rdev.lldi.tids->nstids; 3051 3052 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); 3053 if (!lep) { 3054 PDBG("%s connect request on invalid stid %d\n", __func__, stid); 3055 goto reject; 3056 } 3057 3058 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ? 3059 G_RX_ETHHDR_LEN(htonl(cpl->l2info)) : 3060 G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info)); 3061 if (eth_hdr_len == ETH_HLEN) { 3062 eh = (struct ethhdr *)(req + 1); 3063 iph = (struct iphdr *)(eh + 1); 3064 } else { 3065 vlan_eh = (struct vlan_ethhdr *)(req + 1); 3066 iph = (struct iphdr *)(vlan_eh + 1); 3067 skb->vlan_tci = ntohs(cpl->vlan); 3068 } 3069 3070 if (iph->version != 0x4) 3071 goto reject; 3072 3073 tcph = (struct tcphdr *)(iph + 1); 3074 skb_set_network_header(skb, (void *)iph - (void *)rss); 3075 skb_set_transport_header(skb, (void *)tcph - (void *)rss); 3076 skb_get(skb); 3077 3078 PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__, 3079 ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr), 3080 ntohs(tcph->source), iph->tos); 3081 3082 rt = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source, 3083 iph->tos); 3084 if (!rt) { 3085 pr_err("%s - failed to find dst entry!\n", 3086 __func__); 3087 goto reject; 3088 } 3089 dst = &rt->dst; 3090 neigh = dst_neigh_lookup_skb(dst, skb); 3091 3092 if (!neigh) { 3093 pr_err("%s - failed to allocate neigh!\n", 3094 __func__); 3095 goto free_dst; 3096 } 3097 3098 if (neigh->dev->flags & IFF_LOOPBACK) { 3099 pdev = ip_dev_find(&init_net, iph->daddr); 3100 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, 3101 pdev, 0); 3102 pi = (struct port_info *)netdev_priv(pdev); 3103 tx_chan = cxgb4_port_chan(pdev); 3104 dev_put(pdev); 3105 } else { 3106 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, 3107 neigh->dev, 0); 3108 pi = (struct port_info *)netdev_priv(neigh->dev); 3109 tx_chan = cxgb4_port_chan(neigh->dev); 3110 } 3111 if (!e) { 3112 pr_err("%s - failed to allocate l2t entry!\n", 3113 __func__); 3114 goto free_dst; 3115 } 3116 3117 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; 3118 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; 3119 window = (__force u16) htons((__force u16)tcph->window); 3120 3121 /* Calcuate filter portion for LE region. */ 3122 filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e)); 3123 3124 /* 3125 * Synthesize the cpl_pass_accept_req. We have everything except the 3126 * TID. Once firmware sends a reply with TID we update the TID field 3127 * in cpl and pass it through the regular cpl_pass_accept_req path. 3128 */ 3129 build_cpl_pass_accept_req(skb, stid, iph->tos); 3130 send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr, 3131 tcph->source, ntohl(tcph->seq), filter, window, 3132 rss_qid, pi->port_id); 3133 cxgb4_l2t_release(e); 3134 free_dst: 3135 dst_release(dst); 3136 reject: 3137 return 0; 3138 } 3139 3140 /* 3141 * These are the real handlers that are called from a 3142 * work queue. 3143 */ 3144 static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = { 3145 [CPL_ACT_ESTABLISH] = act_establish, 3146 [CPL_ACT_OPEN_RPL] = act_open_rpl, 3147 [CPL_RX_DATA] = rx_data, 3148 [CPL_ABORT_RPL_RSS] = abort_rpl, 3149 [CPL_ABORT_RPL] = abort_rpl, 3150 [CPL_PASS_OPEN_RPL] = pass_open_rpl, 3151 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl, 3152 [CPL_PASS_ACCEPT_REQ] = pass_accept_req, 3153 [CPL_PASS_ESTABLISH] = pass_establish, 3154 [CPL_PEER_CLOSE] = peer_close, 3155 [CPL_ABORT_REQ_RSS] = peer_abort, 3156 [CPL_CLOSE_CON_RPL] = close_con_rpl, 3157 [CPL_RDMA_TERMINATE] = terminate, 3158 [CPL_FW4_ACK] = fw4_ack, 3159 [CPL_FW6_MSG] = deferred_fw6_msg, 3160 [CPL_RX_PKT] = rx_pkt 3161 }; 3162 3163 static void process_timeout(struct c4iw_ep *ep) 3164 { 3165 struct c4iw_qp_attributes attrs; 3166 int abort = 1; 3167 3168 mutex_lock(&ep->com.mutex); 3169 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, 3170 ep->com.state); 3171 set_bit(TIMEDOUT, &ep->com.history); 3172 switch (ep->com.state) { 3173 case MPA_REQ_SENT: 3174 __state_set(&ep->com, ABORTING); 3175 connect_reply_upcall(ep, -ETIMEDOUT); 3176 break; 3177 case MPA_REQ_WAIT: 3178 __state_set(&ep->com, ABORTING); 3179 break; 3180 case CLOSING: 3181 case MORIBUND: 3182 if (ep->com.cm_id && ep->com.qp) { 3183 attrs.next_state = C4IW_QP_STATE_ERROR; 3184 c4iw_modify_qp(ep->com.qp->rhp, 3185 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 3186 &attrs, 1); 3187 } 3188 __state_set(&ep->com, ABORTING); 3189 break; 3190 default: 3191 WARN(1, "%s unexpected state ep %p tid %u state %u\n", 3192 __func__, ep, ep->hwtid, ep->com.state); 3193 abort = 0; 3194 } 3195 mutex_unlock(&ep->com.mutex); 3196 if (abort) 3197 abort_connection(ep, NULL, GFP_KERNEL); 3198 c4iw_put_ep(&ep->com); 3199 } 3200 3201 static void process_timedout_eps(void) 3202 { 3203 struct c4iw_ep *ep; 3204 3205 spin_lock_irq(&timeout_lock); 3206 while (!list_empty(&timeout_list)) { 3207 struct list_head *tmp; 3208 3209 tmp = timeout_list.next; 3210 list_del(tmp); 3211 spin_unlock_irq(&timeout_lock); 3212 ep = list_entry(tmp, struct c4iw_ep, entry); 3213 process_timeout(ep); 3214 spin_lock_irq(&timeout_lock); 3215 } 3216 spin_unlock_irq(&timeout_lock); 3217 } 3218 3219 static void process_work(struct work_struct *work) 3220 { 3221 struct sk_buff *skb = NULL; 3222 struct c4iw_dev *dev; 3223 struct cpl_act_establish *rpl; 3224 unsigned int opcode; 3225 int ret; 3226 3227 while ((skb = skb_dequeue(&rxq))) { 3228 rpl = cplhdr(skb); 3229 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); 3230 opcode = rpl->ot.opcode; 3231 3232 BUG_ON(!work_handlers[opcode]); 3233 ret = work_handlers[opcode](dev, skb); 3234 if (!ret) 3235 kfree_skb(skb); 3236 } 3237 process_timedout_eps(); 3238 } 3239 3240 static DECLARE_WORK(skb_work, process_work); 3241 3242 static void ep_timeout(unsigned long arg) 3243 { 3244 struct c4iw_ep *ep = (struct c4iw_ep *)arg; 3245 int kickit = 0; 3246 3247 spin_lock(&timeout_lock); 3248 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 3249 list_add_tail(&ep->entry, &timeout_list); 3250 kickit = 1; 3251 } 3252 spin_unlock(&timeout_lock); 3253 if (kickit) 3254 queue_work(workq, &skb_work); 3255 } 3256 3257 /* 3258 * All the CM events are handled on a work queue to have a safe context. 3259 */ 3260 static int sched(struct c4iw_dev *dev, struct sk_buff *skb) 3261 { 3262 3263 /* 3264 * Save dev in the skb->cb area. 3265 */ 3266 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev; 3267 3268 /* 3269 * Queue the skb and schedule the worker thread. 3270 */ 3271 skb_queue_tail(&rxq, skb); 3272 queue_work(workq, &skb_work); 3273 return 0; 3274 } 3275 3276 static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 3277 { 3278 struct cpl_set_tcb_rpl *rpl = cplhdr(skb); 3279 3280 if (rpl->status != CPL_ERR_NONE) { 3281 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u " 3282 "for tid %u\n", rpl->status, GET_TID(rpl)); 3283 } 3284 kfree_skb(skb); 3285 return 0; 3286 } 3287 3288 static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) 3289 { 3290 struct cpl_fw6_msg *rpl = cplhdr(skb); 3291 struct c4iw_wr_wait *wr_waitp; 3292 int ret; 3293 3294 PDBG("%s type %u\n", __func__, rpl->type); 3295 3296 switch (rpl->type) { 3297 case FW6_TYPE_WR_RPL: 3298 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); 3299 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; 3300 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); 3301 if (wr_waitp) 3302 c4iw_wake_up(wr_waitp, ret ? -ret : 0); 3303 kfree_skb(skb); 3304 break; 3305 case FW6_TYPE_CQE: 3306 case FW6_TYPE_OFLD_CONNECTION_WR_RPL: 3307 sched(dev, skb); 3308 break; 3309 default: 3310 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, 3311 rpl->type); 3312 kfree_skb(skb); 3313 break; 3314 } 3315 return 0; 3316 } 3317 3318 static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) 3319 { 3320 struct cpl_abort_req_rss *req = cplhdr(skb); 3321 struct c4iw_ep *ep; 3322 struct tid_info *t = dev->rdev.lldi.tids; 3323 unsigned int tid = GET_TID(req); 3324 3325 ep = lookup_tid(t, tid); 3326 if (!ep) { 3327 printk(KERN_WARNING MOD 3328 "Abort on non-existent endpoint, tid %d\n", tid); 3329 kfree_skb(skb); 3330 return 0; 3331 } 3332 if (is_neg_adv_abort(req->status)) { 3333 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, 3334 ep->hwtid); 3335 kfree_skb(skb); 3336 return 0; 3337 } 3338 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 3339 ep->com.state); 3340 3341 /* 3342 * Wake up any threads in rdma_init() or rdma_fini(). 3343 * However, if we are on MPAv2 and want to retry with MPAv1 3344 * then, don't wake up yet. 3345 */ 3346 if (mpa_rev == 2 && !ep->tried_with_mpa_v1) { 3347 if (ep->com.state != MPA_REQ_SENT) 3348 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 3349 } else 3350 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 3351 sched(dev, skb); 3352 return 0; 3353 } 3354 3355 /* 3356 * Most upcalls from the T4 Core go to sched() to 3357 * schedule the processing on a work queue. 3358 */ 3359 c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = { 3360 [CPL_ACT_ESTABLISH] = sched, 3361 [CPL_ACT_OPEN_RPL] = sched, 3362 [CPL_RX_DATA] = sched, 3363 [CPL_ABORT_RPL_RSS] = sched, 3364 [CPL_ABORT_RPL] = sched, 3365 [CPL_PASS_OPEN_RPL] = sched, 3366 [CPL_CLOSE_LISTSRV_RPL] = sched, 3367 [CPL_PASS_ACCEPT_REQ] = sched, 3368 [CPL_PASS_ESTABLISH] = sched, 3369 [CPL_PEER_CLOSE] = sched, 3370 [CPL_CLOSE_CON_RPL] = sched, 3371 [CPL_ABORT_REQ_RSS] = peer_abort_intr, 3372 [CPL_RDMA_TERMINATE] = sched, 3373 [CPL_FW4_ACK] = sched, 3374 [CPL_SET_TCB_RPL] = set_tcb_rpl, 3375 [CPL_FW6_MSG] = fw6_msg, 3376 [CPL_RX_PKT] = sched 3377 }; 3378 3379 int __init c4iw_cm_init(void) 3380 { 3381 spin_lock_init(&timeout_lock); 3382 skb_queue_head_init(&rxq); 3383 3384 workq = create_singlethread_workqueue("iw_cxgb4"); 3385 if (!workq) 3386 return -ENOMEM; 3387 3388 return 0; 3389 } 3390 3391 void __exit c4iw_cm_term(void) 3392 { 3393 WARN_ON(!list_empty(&timeout_list)); 3394 flush_workqueue(workq); 3395 destroy_workqueue(workq); 3396 } 3397