1 /* 2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/module.h> 33 #include <linux/list.h> 34 #include <linux/workqueue.h> 35 #include <linux/skbuff.h> 36 #include <linux/timer.h> 37 #include <linux/notifier.h> 38 #include <linux/inetdevice.h> 39 #include <linux/ip.h> 40 #include <linux/tcp.h> 41 42 #include <net/neighbour.h> 43 #include <net/netevent.h> 44 #include <net/route.h> 45 46 #include "iw_cxgb4.h" 47 48 static char *states[] = { 49 "idle", 50 "listen", 51 "connecting", 52 "mpa_wait_req", 53 "mpa_req_sent", 54 "mpa_req_rcvd", 55 "mpa_rep_sent", 56 "fpdu_mode", 57 "aborting", 58 "closing", 59 "moribund", 60 "dead", 61 NULL, 62 }; 63 64 static int dack_mode; 65 module_param(dack_mode, int, 0644); 66 MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)"); 67 68 int c4iw_max_read_depth = 8; 69 module_param(c4iw_max_read_depth, int, 0644); 70 MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)"); 71 72 static int enable_tcp_timestamps; 73 module_param(enable_tcp_timestamps, int, 0644); 74 MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); 75 76 static int enable_tcp_sack; 77 module_param(enable_tcp_sack, int, 0644); 78 MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)"); 79 80 static int enable_tcp_window_scaling = 1; 81 module_param(enable_tcp_window_scaling, int, 0644); 82 MODULE_PARM_DESC(enable_tcp_window_scaling, 83 "Enable tcp window scaling (default=1)"); 84 85 int c4iw_debug; 86 module_param(c4iw_debug, int, 0644); 87 MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)"); 88 89 static int peer2peer; 90 module_param(peer2peer, int, 0644); 91 MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)"); 92 93 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; 94 module_param(p2p_type, int, 0644); 95 MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: " 96 "1=RDMA_READ 0=RDMA_WRITE (default 1)"); 97 98 static int ep_timeout_secs = 60; 99 module_param(ep_timeout_secs, int, 0644); 100 MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " 101 "in seconds (default=60)"); 102 103 static int mpa_rev = 1; 104 module_param(mpa_rev, int, 0644); 105 MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " 106 "1 is spec compliant. (default=1)"); 107 108 static int markers_enabled; 109 module_param(markers_enabled, int, 0644); 110 MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)"); 111 112 static int crc_enabled = 1; 113 module_param(crc_enabled, int, 0644); 114 MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)"); 115 116 static int rcv_win = 256 * 1024; 117 module_param(rcv_win, int, 0644); 118 MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)"); 119 120 static int snd_win = 128 * 1024; 121 module_param(snd_win, int, 0644); 122 MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)"); 123 124 static struct workqueue_struct *workq; 125 126 static struct sk_buff_head rxq; 127 128 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); 129 static void ep_timeout(unsigned long arg); 130 static void connect_reply_upcall(struct c4iw_ep *ep, int status); 131 132 static LIST_HEAD(timeout_list); 133 static spinlock_t timeout_lock; 134 135 static void start_ep_timer(struct c4iw_ep *ep) 136 { 137 PDBG("%s ep %p\n", __func__, ep); 138 if (timer_pending(&ep->timer)) { 139 PDBG("%s stopped / restarted timer ep %p\n", __func__, ep); 140 del_timer_sync(&ep->timer); 141 } else 142 c4iw_get_ep(&ep->com); 143 ep->timer.expires = jiffies + ep_timeout_secs * HZ; 144 ep->timer.data = (unsigned long)ep; 145 ep->timer.function = ep_timeout; 146 add_timer(&ep->timer); 147 } 148 149 static void stop_ep_timer(struct c4iw_ep *ep) 150 { 151 PDBG("%s ep %p\n", __func__, ep); 152 if (!timer_pending(&ep->timer)) { 153 printk(KERN_ERR "%s timer stopped when its not running! " 154 "ep %p state %u\n", __func__, ep, ep->com.state); 155 WARN_ON(1); 156 return; 157 } 158 del_timer_sync(&ep->timer); 159 c4iw_put_ep(&ep->com); 160 } 161 162 static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, 163 struct l2t_entry *l2e) 164 { 165 int error = 0; 166 167 if (c4iw_fatal_error(rdev)) { 168 kfree_skb(skb); 169 PDBG("%s - device in error state - dropping\n", __func__); 170 return -EIO; 171 } 172 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); 173 if (error < 0) 174 kfree_skb(skb); 175 return error < 0 ? error : 0; 176 } 177 178 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) 179 { 180 int error = 0; 181 182 if (c4iw_fatal_error(rdev)) { 183 kfree_skb(skb); 184 PDBG("%s - device in error state - dropping\n", __func__); 185 return -EIO; 186 } 187 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); 188 if (error < 0) 189 kfree_skb(skb); 190 return error < 0 ? error : 0; 191 } 192 193 static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) 194 { 195 struct cpl_tid_release *req; 196 197 skb = get_skb(skb, sizeof *req, GFP_KERNEL); 198 if (!skb) 199 return; 200 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); 201 INIT_TP_WR(req, hwtid); 202 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); 203 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); 204 c4iw_ofld_send(rdev, skb); 205 return; 206 } 207 208 static void set_emss(struct c4iw_ep *ep, u16 opt) 209 { 210 ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40; 211 ep->mss = ep->emss; 212 if (GET_TCPOPT_TSTAMP(opt)) 213 ep->emss -= 12; 214 if (ep->emss < 128) 215 ep->emss = 128; 216 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt), 217 ep->mss, ep->emss); 218 } 219 220 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) 221 { 222 enum c4iw_ep_state state; 223 224 mutex_lock(&epc->mutex); 225 state = epc->state; 226 mutex_unlock(&epc->mutex); 227 return state; 228 } 229 230 static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 231 { 232 epc->state = new; 233 } 234 235 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 236 { 237 mutex_lock(&epc->mutex); 238 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]); 239 __state_set(epc, new); 240 mutex_unlock(&epc->mutex); 241 return; 242 } 243 244 static void *alloc_ep(int size, gfp_t gfp) 245 { 246 struct c4iw_ep_common *epc; 247 248 epc = kzalloc(size, gfp); 249 if (epc) { 250 kref_init(&epc->kref); 251 mutex_init(&epc->mutex); 252 c4iw_init_wr_wait(&epc->wr_wait); 253 } 254 PDBG("%s alloc ep %p\n", __func__, epc); 255 return epc; 256 } 257 258 void _c4iw_free_ep(struct kref *kref) 259 { 260 struct c4iw_ep *ep; 261 262 ep = container_of(kref, struct c4iw_ep, com.kref); 263 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); 264 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { 265 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); 266 dst_release(ep->dst); 267 cxgb4_l2t_release(ep->l2t); 268 } 269 kfree(ep); 270 } 271 272 static void release_ep_resources(struct c4iw_ep *ep) 273 { 274 set_bit(RELEASE_RESOURCES, &ep->com.flags); 275 c4iw_put_ep(&ep->com); 276 } 277 278 static int status2errno(int status) 279 { 280 switch (status) { 281 case CPL_ERR_NONE: 282 return 0; 283 case CPL_ERR_CONN_RESET: 284 return -ECONNRESET; 285 case CPL_ERR_ARP_MISS: 286 return -EHOSTUNREACH; 287 case CPL_ERR_CONN_TIMEDOUT: 288 return -ETIMEDOUT; 289 case CPL_ERR_TCAM_FULL: 290 return -ENOMEM; 291 case CPL_ERR_CONN_EXIST: 292 return -EADDRINUSE; 293 default: 294 return -EIO; 295 } 296 } 297 298 /* 299 * Try and reuse skbs already allocated... 300 */ 301 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) 302 { 303 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) { 304 skb_trim(skb, 0); 305 skb_get(skb); 306 skb_reset_transport_header(skb); 307 } else { 308 skb = alloc_skb(len, gfp); 309 } 310 return skb; 311 } 312 313 static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip, 314 __be32 peer_ip, __be16 local_port, 315 __be16 peer_port, u8 tos) 316 { 317 struct rtable *rt; 318 struct flowi fl = { 319 .oif = 0, 320 .nl_u = { 321 .ip4_u = { 322 .daddr = peer_ip, 323 .saddr = local_ip, 324 .tos = tos} 325 }, 326 .proto = IPPROTO_TCP, 327 .uli_u = { 328 .ports = { 329 .sport = local_port, 330 .dport = peer_port} 331 } 332 }; 333 334 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0)) 335 return NULL; 336 return rt; 337 } 338 339 static void arp_failure_discard(void *handle, struct sk_buff *skb) 340 { 341 PDBG("%s c4iw_dev %p\n", __func__, handle); 342 kfree_skb(skb); 343 } 344 345 /* 346 * Handle an ARP failure for an active open. 347 */ 348 static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) 349 { 350 printk(KERN_ERR MOD "ARP failure duing connect\n"); 351 kfree_skb(skb); 352 } 353 354 /* 355 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant 356 * and send it along. 357 */ 358 static void abort_arp_failure(void *handle, struct sk_buff *skb) 359 { 360 struct c4iw_rdev *rdev = handle; 361 struct cpl_abort_req *req = cplhdr(skb); 362 363 PDBG("%s rdev %p\n", __func__, rdev); 364 req->cmd = CPL_ABORT_NO_RST; 365 c4iw_ofld_send(rdev, skb); 366 } 367 368 static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) 369 { 370 unsigned int flowclen = 80; 371 struct fw_flowc_wr *flowc; 372 int i; 373 374 skb = get_skb(skb, flowclen, GFP_KERNEL); 375 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); 376 377 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) | 378 FW_FLOWC_WR_NPARAMS(8)); 379 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen, 380 16)) | FW_WR_FLOWID(ep->hwtid)); 381 382 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 383 flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8); 384 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 385 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); 386 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 387 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan); 388 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 389 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid); 390 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 391 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq); 392 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 393 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); 394 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 395 flowc->mnemval[6].val = cpu_to_be32(snd_win); 396 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 397 flowc->mnemval[7].val = cpu_to_be32(ep->emss); 398 /* Pad WR to 16 byte boundary */ 399 flowc->mnemval[8].mnemonic = 0; 400 flowc->mnemval[8].val = 0; 401 for (i = 0; i < 9; i++) { 402 flowc->mnemval[i].r4[0] = 0; 403 flowc->mnemval[i].r4[1] = 0; 404 flowc->mnemval[i].r4[2] = 0; 405 } 406 407 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 408 c4iw_ofld_send(&ep->com.dev->rdev, skb); 409 } 410 411 static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp) 412 { 413 struct cpl_close_con_req *req; 414 struct sk_buff *skb; 415 int wrlen = roundup(sizeof *req, 16); 416 417 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 418 skb = get_skb(NULL, wrlen, gfp); 419 if (!skb) { 420 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); 421 return -ENOMEM; 422 } 423 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 424 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 425 req = (struct cpl_close_con_req *) skb_put(skb, wrlen); 426 memset(req, 0, wrlen); 427 INIT_TP_WR(req, ep->hwtid); 428 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, 429 ep->hwtid)); 430 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 431 } 432 433 static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) 434 { 435 struct cpl_abort_req *req; 436 int wrlen = roundup(sizeof *req, 16); 437 438 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 439 skb = get_skb(skb, wrlen, gfp); 440 if (!skb) { 441 printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 442 __func__); 443 return -ENOMEM; 444 } 445 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 446 t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure); 447 req = (struct cpl_abort_req *) skb_put(skb, wrlen); 448 memset(req, 0, wrlen); 449 INIT_TP_WR(req, ep->hwtid); 450 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); 451 req->cmd = CPL_ABORT_SEND_RST; 452 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 453 } 454 455 static int send_connect(struct c4iw_ep *ep) 456 { 457 struct cpl_act_open_req *req; 458 struct sk_buff *skb; 459 u64 opt0; 460 u32 opt2; 461 unsigned int mtu_idx; 462 int wscale; 463 int wrlen = roundup(sizeof *req, 16); 464 465 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); 466 467 skb = get_skb(NULL, wrlen, GFP_KERNEL); 468 if (!skb) { 469 printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 470 __func__); 471 return -ENOMEM; 472 } 473 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 474 475 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 476 wscale = compute_wscale(rcv_win); 477 opt0 = KEEP_ALIVE(1) | 478 DELACK(1) | 479 WND_SCALE(wscale) | 480 MSS_IDX(mtu_idx) | 481 L2T_IDX(ep->l2t->idx) | 482 TX_CHAN(ep->tx_chan) | 483 SMAC_SEL(ep->smac_idx) | 484 DSCP(ep->tos) | 485 RCV_BUFSIZ(rcv_win>>10); 486 opt2 = RX_CHANNEL(0) | 487 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 488 if (enable_tcp_timestamps) 489 opt2 |= TSTAMPS_EN(1); 490 if (enable_tcp_sack) 491 opt2 |= SACK_EN(1); 492 if (wscale && enable_tcp_window_scaling) 493 opt2 |= WND_SCALE_EN(1); 494 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); 495 496 req = (struct cpl_act_open_req *) skb_put(skb, wrlen); 497 INIT_TP_WR(req, 0); 498 OPCODE_TID(req) = cpu_to_be32( 499 MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid))); 500 req->local_port = ep->com.local_addr.sin_port; 501 req->peer_port = ep->com.remote_addr.sin_port; 502 req->local_ip = ep->com.local_addr.sin_addr.s_addr; 503 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; 504 req->opt0 = cpu_to_be64(opt0); 505 req->params = 0; 506 req->opt2 = cpu_to_be32(opt2); 507 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 508 } 509 510 static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb) 511 { 512 int mpalen, wrlen; 513 struct fw_ofld_tx_data_wr *req; 514 struct mpa_message *mpa; 515 516 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 517 518 BUG_ON(skb_cloned(skb)); 519 520 mpalen = sizeof(*mpa) + ep->plen; 521 wrlen = roundup(mpalen + sizeof *req, 16); 522 skb = get_skb(skb, wrlen, GFP_KERNEL); 523 if (!skb) { 524 connect_reply_upcall(ep, -ENOMEM); 525 return; 526 } 527 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 528 529 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 530 memset(req, 0, wrlen); 531 req->op_to_immdlen = cpu_to_be32( 532 FW_WR_OP(FW_OFLD_TX_DATA_WR) | 533 FW_WR_COMPL(1) | 534 FW_WR_IMMDLEN(mpalen)); 535 req->flowid_len16 = cpu_to_be32( 536 FW_WR_FLOWID(ep->hwtid) | 537 FW_WR_LEN16(wrlen >> 4)); 538 req->plen = cpu_to_be32(mpalen); 539 req->tunnel_to_proxy = cpu_to_be32( 540 FW_OFLD_TX_DATA_WR_FLUSH(1) | 541 FW_OFLD_TX_DATA_WR_SHOVE(1)); 542 543 mpa = (struct mpa_message *)(req + 1); 544 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); 545 mpa->flags = (crc_enabled ? MPA_CRC : 0) | 546 (markers_enabled ? MPA_MARKERS : 0); 547 mpa->private_data_size = htons(ep->plen); 548 mpa->revision = mpa_rev; 549 550 if (ep->plen) 551 memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen); 552 553 /* 554 * Reference the mpa skb. This ensures the data area 555 * will remain in memory until the hw acks the tx. 556 * Function fw4_ack() will deref it. 557 */ 558 skb_get(skb); 559 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 560 BUG_ON(ep->mpa_skb); 561 ep->mpa_skb = skb; 562 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 563 start_ep_timer(ep); 564 state_set(&ep->com, MPA_REQ_SENT); 565 ep->mpa_attr.initiator = 1; 566 return; 567 } 568 569 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) 570 { 571 int mpalen, wrlen; 572 struct fw_ofld_tx_data_wr *req; 573 struct mpa_message *mpa; 574 struct sk_buff *skb; 575 576 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 577 578 mpalen = sizeof(*mpa) + plen; 579 wrlen = roundup(mpalen + sizeof *req, 16); 580 581 skb = get_skb(NULL, wrlen, GFP_KERNEL); 582 if (!skb) { 583 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); 584 return -ENOMEM; 585 } 586 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 587 588 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 589 memset(req, 0, wrlen); 590 req->op_to_immdlen = cpu_to_be32( 591 FW_WR_OP(FW_OFLD_TX_DATA_WR) | 592 FW_WR_COMPL(1) | 593 FW_WR_IMMDLEN(mpalen)); 594 req->flowid_len16 = cpu_to_be32( 595 FW_WR_FLOWID(ep->hwtid) | 596 FW_WR_LEN16(wrlen >> 4)); 597 req->plen = cpu_to_be32(mpalen); 598 req->tunnel_to_proxy = cpu_to_be32( 599 FW_OFLD_TX_DATA_WR_FLUSH(1) | 600 FW_OFLD_TX_DATA_WR_SHOVE(1)); 601 602 mpa = (struct mpa_message *)(req + 1); 603 memset(mpa, 0, sizeof(*mpa)); 604 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 605 mpa->flags = MPA_REJECT; 606 mpa->revision = mpa_rev; 607 mpa->private_data_size = htons(plen); 608 if (plen) 609 memcpy(mpa->private_data, pdata, plen); 610 611 /* 612 * Reference the mpa skb again. This ensures the data area 613 * will remain in memory until the hw acks the tx. 614 * Function fw4_ack() will deref it. 615 */ 616 skb_get(skb); 617 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 618 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 619 BUG_ON(ep->mpa_skb); 620 ep->mpa_skb = skb; 621 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 622 } 623 624 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) 625 { 626 int mpalen, wrlen; 627 struct fw_ofld_tx_data_wr *req; 628 struct mpa_message *mpa; 629 struct sk_buff *skb; 630 631 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 632 633 mpalen = sizeof(*mpa) + plen; 634 wrlen = roundup(mpalen + sizeof *req, 16); 635 636 skb = get_skb(NULL, wrlen, GFP_KERNEL); 637 if (!skb) { 638 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); 639 return -ENOMEM; 640 } 641 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 642 643 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); 644 memset(req, 0, wrlen); 645 req->op_to_immdlen = cpu_to_be32( 646 FW_WR_OP(FW_OFLD_TX_DATA_WR) | 647 FW_WR_COMPL(1) | 648 FW_WR_IMMDLEN(mpalen)); 649 req->flowid_len16 = cpu_to_be32( 650 FW_WR_FLOWID(ep->hwtid) | 651 FW_WR_LEN16(wrlen >> 4)); 652 req->plen = cpu_to_be32(mpalen); 653 req->tunnel_to_proxy = cpu_to_be32( 654 FW_OFLD_TX_DATA_WR_FLUSH(1) | 655 FW_OFLD_TX_DATA_WR_SHOVE(1)); 656 657 mpa = (struct mpa_message *)(req + 1); 658 memset(mpa, 0, sizeof(*mpa)); 659 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 660 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | 661 (markers_enabled ? MPA_MARKERS : 0); 662 mpa->revision = mpa_rev; 663 mpa->private_data_size = htons(plen); 664 if (plen) 665 memcpy(mpa->private_data, pdata, plen); 666 667 /* 668 * Reference the mpa skb. This ensures the data area 669 * will remain in memory until the hw acks the tx. 670 * Function fw4_ack() will deref it. 671 */ 672 skb_get(skb); 673 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 674 ep->mpa_skb = skb; 675 state_set(&ep->com, MPA_REP_SENT); 676 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 677 } 678 679 static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) 680 { 681 struct c4iw_ep *ep; 682 struct cpl_act_establish *req = cplhdr(skb); 683 unsigned int tid = GET_TID(req); 684 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); 685 struct tid_info *t = dev->rdev.lldi.tids; 686 687 ep = lookup_atid(t, atid); 688 689 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid, 690 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); 691 692 dst_confirm(ep->dst); 693 694 /* setup the hwtid for this connection */ 695 ep->hwtid = tid; 696 cxgb4_insert_tid(t, ep, tid); 697 698 ep->snd_seq = be32_to_cpu(req->snd_isn); 699 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 700 701 set_emss(ep, ntohs(req->tcp_opt)); 702 703 /* dealloc the atid */ 704 cxgb4_free_atid(t, atid); 705 706 /* start MPA negotiation */ 707 send_flowc(ep, NULL); 708 send_mpa_req(ep, skb); 709 710 return 0; 711 } 712 713 static void close_complete_upcall(struct c4iw_ep *ep) 714 { 715 struct iw_cm_event event; 716 717 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 718 memset(&event, 0, sizeof(event)); 719 event.event = IW_CM_EVENT_CLOSE; 720 if (ep->com.cm_id) { 721 PDBG("close complete delivered ep %p cm_id %p tid %u\n", 722 ep, ep->com.cm_id, ep->hwtid); 723 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 724 ep->com.cm_id->rem_ref(ep->com.cm_id); 725 ep->com.cm_id = NULL; 726 ep->com.qp = NULL; 727 } 728 } 729 730 static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) 731 { 732 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 733 close_complete_upcall(ep); 734 state_set(&ep->com, ABORTING); 735 return send_abort(ep, skb, gfp); 736 } 737 738 static void peer_close_upcall(struct c4iw_ep *ep) 739 { 740 struct iw_cm_event event; 741 742 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 743 memset(&event, 0, sizeof(event)); 744 event.event = IW_CM_EVENT_DISCONNECT; 745 if (ep->com.cm_id) { 746 PDBG("peer close delivered ep %p cm_id %p tid %u\n", 747 ep, ep->com.cm_id, ep->hwtid); 748 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 749 } 750 } 751 752 static void peer_abort_upcall(struct c4iw_ep *ep) 753 { 754 struct iw_cm_event event; 755 756 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 757 memset(&event, 0, sizeof(event)); 758 event.event = IW_CM_EVENT_CLOSE; 759 event.status = -ECONNRESET; 760 if (ep->com.cm_id) { 761 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep, 762 ep->com.cm_id, ep->hwtid); 763 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 764 ep->com.cm_id->rem_ref(ep->com.cm_id); 765 ep->com.cm_id = NULL; 766 ep->com.qp = NULL; 767 } 768 } 769 770 static void connect_reply_upcall(struct c4iw_ep *ep, int status) 771 { 772 struct iw_cm_event event; 773 774 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status); 775 memset(&event, 0, sizeof(event)); 776 event.event = IW_CM_EVENT_CONNECT_REPLY; 777 event.status = status; 778 event.local_addr = ep->com.local_addr; 779 event.remote_addr = ep->com.remote_addr; 780 781 if ((status == 0) || (status == -ECONNREFUSED)) { 782 event.private_data_len = ep->plen; 783 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 784 } 785 786 PDBG("%s ep %p tid %u status %d\n", __func__, ep, 787 ep->hwtid, status); 788 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 789 790 if (status < 0) { 791 ep->com.cm_id->rem_ref(ep->com.cm_id); 792 ep->com.cm_id = NULL; 793 ep->com.qp = NULL; 794 } 795 } 796 797 static void connect_request_upcall(struct c4iw_ep *ep) 798 { 799 struct iw_cm_event event; 800 801 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 802 memset(&event, 0, sizeof(event)); 803 event.event = IW_CM_EVENT_CONNECT_REQUEST; 804 event.local_addr = ep->com.local_addr; 805 event.remote_addr = ep->com.remote_addr; 806 event.private_data_len = ep->plen; 807 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 808 event.provider_data = ep; 809 if (state_read(&ep->parent_ep->com) != DEAD) { 810 c4iw_get_ep(&ep->com); 811 ep->parent_ep->com.cm_id->event_handler( 812 ep->parent_ep->com.cm_id, 813 &event); 814 } 815 c4iw_put_ep(&ep->parent_ep->com); 816 ep->parent_ep = NULL; 817 } 818 819 static void established_upcall(struct c4iw_ep *ep) 820 { 821 struct iw_cm_event event; 822 823 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 824 memset(&event, 0, sizeof(event)); 825 event.event = IW_CM_EVENT_ESTABLISHED; 826 if (ep->com.cm_id) { 827 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 828 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 829 } 830 } 831 832 static int update_rx_credits(struct c4iw_ep *ep, u32 credits) 833 { 834 struct cpl_rx_data_ack *req; 835 struct sk_buff *skb; 836 int wrlen = roundup(sizeof *req, 16); 837 838 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); 839 skb = get_skb(NULL, wrlen, GFP_KERNEL); 840 if (!skb) { 841 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); 842 return 0; 843 } 844 845 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); 846 memset(req, 0, wrlen); 847 INIT_TP_WR(req, ep->hwtid); 848 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 849 ep->hwtid)); 850 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) | 851 F_RX_DACK_CHANGE | 852 V_RX_DACK_MODE(dack_mode)); 853 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); 854 c4iw_ofld_send(&ep->com.dev->rdev, skb); 855 return credits; 856 } 857 858 static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) 859 { 860 struct mpa_message *mpa; 861 u16 plen; 862 struct c4iw_qp_attributes attrs; 863 enum c4iw_qp_attr_mask mask; 864 int err; 865 866 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 867 868 /* 869 * Stop mpa timer. If it expired, then the state has 870 * changed and we bail since ep_timeout already aborted 871 * the connection. 872 */ 873 stop_ep_timer(ep); 874 if (state_read(&ep->com) != MPA_REQ_SENT) 875 return; 876 877 /* 878 * If we get more than the supported amount of private data 879 * then we must fail this connection. 880 */ 881 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 882 err = -EINVAL; 883 goto err; 884 } 885 886 /* 887 * copy the new data into our accumulation buffer. 888 */ 889 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 890 skb->len); 891 ep->mpa_pkt_len += skb->len; 892 893 /* 894 * if we don't even have the mpa message, then bail. 895 */ 896 if (ep->mpa_pkt_len < sizeof(*mpa)) 897 return; 898 mpa = (struct mpa_message *) ep->mpa_pkt; 899 900 /* Validate MPA header. */ 901 if (mpa->revision != mpa_rev) { 902 err = -EPROTO; 903 goto err; 904 } 905 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { 906 err = -EPROTO; 907 goto err; 908 } 909 910 plen = ntohs(mpa->private_data_size); 911 912 /* 913 * Fail if there's too much private data. 914 */ 915 if (plen > MPA_MAX_PRIVATE_DATA) { 916 err = -EPROTO; 917 goto err; 918 } 919 920 /* 921 * If plen does not account for pkt size 922 */ 923 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 924 err = -EPROTO; 925 goto err; 926 } 927 928 ep->plen = (u8) plen; 929 930 /* 931 * If we don't have all the pdata yet, then bail. 932 * We'll continue process when more data arrives. 933 */ 934 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 935 return; 936 937 if (mpa->flags & MPA_REJECT) { 938 err = -ECONNREFUSED; 939 goto err; 940 } 941 942 /* 943 * If we get here we have accumulated the entire mpa 944 * start reply message including private data. And 945 * the MPA header is valid. 946 */ 947 state_set(&ep->com, FPDU_MODE); 948 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 949 ep->mpa_attr.recv_marker_enabled = markers_enabled; 950 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 951 ep->mpa_attr.version = mpa_rev; 952 ep->mpa_attr.p2p_type = peer2peer ? p2p_type : 953 FW_RI_INIT_P2PTYPE_DISABLED; 954 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 955 "xmit_marker_enabled=%d, version=%d\n", __func__, 956 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 957 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); 958 959 attrs.mpa_attr = ep->mpa_attr; 960 attrs.max_ird = ep->ird; 961 attrs.max_ord = ep->ord; 962 attrs.llp_stream_handle = ep; 963 attrs.next_state = C4IW_QP_STATE_RTS; 964 965 mask = C4IW_QP_ATTR_NEXT_STATE | 966 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | 967 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; 968 969 /* bind QP and TID with INIT_WR */ 970 err = c4iw_modify_qp(ep->com.qp->rhp, 971 ep->com.qp, mask, &attrs, 1); 972 if (err) 973 goto err; 974 goto out; 975 err: 976 state_set(&ep->com, ABORTING); 977 send_abort(ep, skb, GFP_KERNEL); 978 out: 979 connect_reply_upcall(ep, err); 980 return; 981 } 982 983 static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) 984 { 985 struct mpa_message *mpa; 986 u16 plen; 987 988 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 989 990 if (state_read(&ep->com) != MPA_REQ_WAIT) 991 return; 992 993 /* 994 * If we get more than the supported amount of private data 995 * then we must fail this connection. 996 */ 997 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 998 stop_ep_timer(ep); 999 abort_connection(ep, skb, GFP_KERNEL); 1000 return; 1001 } 1002 1003 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); 1004 1005 /* 1006 * Copy the new data into our accumulation buffer. 1007 */ 1008 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 1009 skb->len); 1010 ep->mpa_pkt_len += skb->len; 1011 1012 /* 1013 * If we don't even have the mpa message, then bail. 1014 * We'll continue process when more data arrives. 1015 */ 1016 if (ep->mpa_pkt_len < sizeof(*mpa)) 1017 return; 1018 1019 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); 1020 stop_ep_timer(ep); 1021 mpa = (struct mpa_message *) ep->mpa_pkt; 1022 1023 /* 1024 * Validate MPA Header. 1025 */ 1026 if (mpa->revision != mpa_rev) { 1027 abort_connection(ep, skb, GFP_KERNEL); 1028 return; 1029 } 1030 1031 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { 1032 abort_connection(ep, skb, GFP_KERNEL); 1033 return; 1034 } 1035 1036 plen = ntohs(mpa->private_data_size); 1037 1038 /* 1039 * Fail if there's too much private data. 1040 */ 1041 if (plen > MPA_MAX_PRIVATE_DATA) { 1042 abort_connection(ep, skb, GFP_KERNEL); 1043 return; 1044 } 1045 1046 /* 1047 * If plen does not account for pkt size 1048 */ 1049 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1050 abort_connection(ep, skb, GFP_KERNEL); 1051 return; 1052 } 1053 ep->plen = (u8) plen; 1054 1055 /* 1056 * If we don't have all the pdata yet, then bail. 1057 */ 1058 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1059 return; 1060 1061 /* 1062 * If we get here we have accumulated the entire mpa 1063 * start reply message including private data. 1064 */ 1065 ep->mpa_attr.initiator = 0; 1066 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1067 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1068 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1069 ep->mpa_attr.version = mpa_rev; 1070 ep->mpa_attr.p2p_type = peer2peer ? p2p_type : 1071 FW_RI_INIT_P2PTYPE_DISABLED; 1072 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 1073 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__, 1074 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 1075 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1076 ep->mpa_attr.p2p_type); 1077 1078 state_set(&ep->com, MPA_REQ_RCVD); 1079 1080 /* drive upcall */ 1081 connect_request_upcall(ep); 1082 return; 1083 } 1084 1085 static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) 1086 { 1087 struct c4iw_ep *ep; 1088 struct cpl_rx_data *hdr = cplhdr(skb); 1089 unsigned int dlen = ntohs(hdr->len); 1090 unsigned int tid = GET_TID(hdr); 1091 struct tid_info *t = dev->rdev.lldi.tids; 1092 1093 ep = lookup_tid(t, tid); 1094 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); 1095 skb_pull(skb, sizeof(*hdr)); 1096 skb_trim(skb, dlen); 1097 1098 ep->rcv_seq += dlen; 1099 BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen)); 1100 1101 /* update RX credits */ 1102 update_rx_credits(ep, dlen); 1103 1104 switch (state_read(&ep->com)) { 1105 case MPA_REQ_SENT: 1106 process_mpa_reply(ep, skb); 1107 break; 1108 case MPA_REQ_WAIT: 1109 process_mpa_request(ep, skb); 1110 break; 1111 case MPA_REP_SENT: 1112 break; 1113 default: 1114 printk(KERN_ERR MOD "%s Unexpected streaming data." 1115 " ep %p state %d tid %u\n", 1116 __func__, ep, state_read(&ep->com), ep->hwtid); 1117 1118 /* 1119 * The ep will timeout and inform the ULP of the failure. 1120 * See ep_timeout(). 1121 */ 1122 break; 1123 } 1124 return 0; 1125 } 1126 1127 static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1128 { 1129 struct c4iw_ep *ep; 1130 struct cpl_abort_rpl_rss *rpl = cplhdr(skb); 1131 int release = 0; 1132 unsigned int tid = GET_TID(rpl); 1133 struct tid_info *t = dev->rdev.lldi.tids; 1134 1135 ep = lookup_tid(t, tid); 1136 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1137 BUG_ON(!ep); 1138 mutex_lock(&ep->com.mutex); 1139 switch (ep->com.state) { 1140 case ABORTING: 1141 __state_set(&ep->com, DEAD); 1142 release = 1; 1143 break; 1144 default: 1145 printk(KERN_ERR "%s ep %p state %d\n", 1146 __func__, ep, ep->com.state); 1147 break; 1148 } 1149 mutex_unlock(&ep->com.mutex); 1150 1151 if (release) 1152 release_ep_resources(ep); 1153 return 0; 1154 } 1155 1156 /* 1157 * Return whether a failed active open has allocated a TID 1158 */ 1159 static inline int act_open_has_tid(int status) 1160 { 1161 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && 1162 status != CPL_ERR_ARP_MISS; 1163 } 1164 1165 static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1166 { 1167 struct c4iw_ep *ep; 1168 struct cpl_act_open_rpl *rpl = cplhdr(skb); 1169 unsigned int atid = GET_TID_TID(GET_AOPEN_ATID( 1170 ntohl(rpl->atid_status))); 1171 struct tid_info *t = dev->rdev.lldi.tids; 1172 int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status)); 1173 1174 ep = lookup_atid(t, atid); 1175 1176 PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, 1177 status, status2errno(status)); 1178 1179 if (status == CPL_ERR_RTX_NEG_ADVICE) { 1180 printk(KERN_WARNING MOD "Connection problems for atid %u\n", 1181 atid); 1182 return 0; 1183 } 1184 1185 connect_reply_upcall(ep, status2errno(status)); 1186 state_set(&ep->com, DEAD); 1187 1188 if (status && act_open_has_tid(status)) 1189 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); 1190 1191 cxgb4_free_atid(t, atid); 1192 dst_release(ep->dst); 1193 cxgb4_l2t_release(ep->l2t); 1194 c4iw_put_ep(&ep->com); 1195 1196 return 0; 1197 } 1198 1199 static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1200 { 1201 struct cpl_pass_open_rpl *rpl = cplhdr(skb); 1202 struct tid_info *t = dev->rdev.lldi.tids; 1203 unsigned int stid = GET_TID(rpl); 1204 struct c4iw_listen_ep *ep = lookup_stid(t, stid); 1205 1206 if (!ep) { 1207 printk(KERN_ERR MOD "stid %d lookup failure!\n", stid); 1208 return 0; 1209 } 1210 PDBG("%s ep %p status %d error %d\n", __func__, ep, 1211 rpl->status, status2errno(rpl->status)); 1212 ep->com.wr_wait.ret = status2errno(rpl->status); 1213 ep->com.wr_wait.done = 1; 1214 wake_up(&ep->com.wr_wait.wait); 1215 1216 return 0; 1217 } 1218 1219 static int listen_stop(struct c4iw_listen_ep *ep) 1220 { 1221 struct sk_buff *skb; 1222 struct cpl_close_listsvr_req *req; 1223 1224 PDBG("%s ep %p\n", __func__, ep); 1225 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1226 if (!skb) { 1227 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); 1228 return -ENOMEM; 1229 } 1230 req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req)); 1231 INIT_TP_WR(req, 0); 1232 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, 1233 ep->stid)); 1234 req->reply_ctrl = cpu_to_be16( 1235 QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0])); 1236 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); 1237 return c4iw_ofld_send(&ep->com.dev->rdev, skb); 1238 } 1239 1240 static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1241 { 1242 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); 1243 struct tid_info *t = dev->rdev.lldi.tids; 1244 unsigned int stid = GET_TID(rpl); 1245 struct c4iw_listen_ep *ep = lookup_stid(t, stid); 1246 1247 PDBG("%s ep %p\n", __func__, ep); 1248 ep->com.wr_wait.ret = status2errno(rpl->status); 1249 ep->com.wr_wait.done = 1; 1250 wake_up(&ep->com.wr_wait.wait); 1251 return 0; 1252 } 1253 1254 static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb, 1255 struct cpl_pass_accept_req *req) 1256 { 1257 struct cpl_pass_accept_rpl *rpl; 1258 unsigned int mtu_idx; 1259 u64 opt0; 1260 u32 opt2; 1261 int wscale; 1262 1263 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1264 BUG_ON(skb_cloned(skb)); 1265 skb_trim(skb, sizeof(*rpl)); 1266 skb_get(skb); 1267 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 1268 wscale = compute_wscale(rcv_win); 1269 opt0 = KEEP_ALIVE(1) | 1270 DELACK(1) | 1271 WND_SCALE(wscale) | 1272 MSS_IDX(mtu_idx) | 1273 L2T_IDX(ep->l2t->idx) | 1274 TX_CHAN(ep->tx_chan) | 1275 SMAC_SEL(ep->smac_idx) | 1276 DSCP(ep->tos) | 1277 RCV_BUFSIZ(rcv_win>>10); 1278 opt2 = RX_CHANNEL(0) | 1279 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 1280 1281 if (enable_tcp_timestamps && req->tcpopt.tstamp) 1282 opt2 |= TSTAMPS_EN(1); 1283 if (enable_tcp_sack && req->tcpopt.sack) 1284 opt2 |= SACK_EN(1); 1285 if (wscale && enable_tcp_window_scaling) 1286 opt2 |= WND_SCALE_EN(1); 1287 1288 rpl = cplhdr(skb); 1289 INIT_TP_WR(rpl, ep->hwtid); 1290 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, 1291 ep->hwtid)); 1292 rpl->opt0 = cpu_to_be64(opt0); 1293 rpl->opt2 = cpu_to_be32(opt2); 1294 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 1295 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1296 1297 return; 1298 } 1299 1300 static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip, 1301 struct sk_buff *skb) 1302 { 1303 PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid, 1304 peer_ip); 1305 BUG_ON(skb_cloned(skb)); 1306 skb_trim(skb, sizeof(struct cpl_tid_release)); 1307 skb_get(skb); 1308 release_tid(&dev->rdev, hwtid, skb); 1309 return; 1310 } 1311 1312 static void get_4tuple(struct cpl_pass_accept_req *req, 1313 __be32 *local_ip, __be32 *peer_ip, 1314 __be16 *local_port, __be16 *peer_port) 1315 { 1316 int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len)); 1317 int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len)); 1318 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len); 1319 struct tcphdr *tcp = (struct tcphdr *) 1320 ((u8 *)(req + 1) + eth_len + ip_len); 1321 1322 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__, 1323 ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source), 1324 ntohs(tcp->dest)); 1325 1326 *peer_ip = ip->saddr; 1327 *local_ip = ip->daddr; 1328 *peer_port = tcp->source; 1329 *local_port = tcp->dest; 1330 1331 return; 1332 } 1333 1334 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) 1335 { 1336 struct c4iw_ep *child_ep, *parent_ep; 1337 struct cpl_pass_accept_req *req = cplhdr(skb); 1338 unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); 1339 struct tid_info *t = dev->rdev.lldi.tids; 1340 unsigned int hwtid = GET_TID(req); 1341 struct dst_entry *dst; 1342 struct l2t_entry *l2t; 1343 struct rtable *rt; 1344 __be32 local_ip, peer_ip; 1345 __be16 local_port, peer_port; 1346 struct net_device *pdev; 1347 u32 tx_chan, smac_idx; 1348 u16 rss_qid; 1349 u32 mtu; 1350 int step; 1351 int txq_idx, ctrlq_idx; 1352 1353 parent_ep = lookup_stid(t, stid); 1354 PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid); 1355 1356 get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port); 1357 1358 if (state_read(&parent_ep->com) != LISTEN) { 1359 printk(KERN_ERR "%s - listening ep not in LISTEN\n", 1360 __func__); 1361 goto reject; 1362 } 1363 1364 /* Find output route */ 1365 rt = find_route(dev, local_ip, peer_ip, local_port, peer_port, 1366 GET_POPEN_TOS(ntohl(req->tos_stid))); 1367 if (!rt) { 1368 printk(KERN_ERR MOD "%s - failed to find dst entry!\n", 1369 __func__); 1370 goto reject; 1371 } 1372 dst = &rt->dst; 1373 if (dst->neighbour->dev->flags & IFF_LOOPBACK) { 1374 pdev = ip_dev_find(&init_net, peer_ip); 1375 BUG_ON(!pdev); 1376 l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour, 1377 pdev, 0); 1378 mtu = pdev->mtu; 1379 tx_chan = cxgb4_port_chan(pdev); 1380 smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; 1381 step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; 1382 txq_idx = cxgb4_port_idx(pdev) * step; 1383 ctrlq_idx = cxgb4_port_idx(pdev); 1384 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; 1385 rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step]; 1386 dev_put(pdev); 1387 } else { 1388 l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour, 1389 dst->neighbour->dev, 0); 1390 mtu = dst_mtu(dst); 1391 tx_chan = cxgb4_port_chan(dst->neighbour->dev); 1392 smac_idx = (cxgb4_port_viid(dst->neighbour->dev) & 0x7F) << 1; 1393 step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; 1394 txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step; 1395 ctrlq_idx = cxgb4_port_idx(dst->neighbour->dev); 1396 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; 1397 rss_qid = dev->rdev.lldi.rxq_ids[ 1398 cxgb4_port_idx(dst->neighbour->dev) * step]; 1399 } 1400 if (!l2t) { 1401 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", 1402 __func__); 1403 dst_release(dst); 1404 goto reject; 1405 } 1406 1407 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); 1408 if (!child_ep) { 1409 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", 1410 __func__); 1411 cxgb4_l2t_release(l2t); 1412 dst_release(dst); 1413 goto reject; 1414 } 1415 state_set(&child_ep->com, CONNECTING); 1416 child_ep->com.dev = dev; 1417 child_ep->com.cm_id = NULL; 1418 child_ep->com.local_addr.sin_family = PF_INET; 1419 child_ep->com.local_addr.sin_port = local_port; 1420 child_ep->com.local_addr.sin_addr.s_addr = local_ip; 1421 child_ep->com.remote_addr.sin_family = PF_INET; 1422 child_ep->com.remote_addr.sin_port = peer_port; 1423 child_ep->com.remote_addr.sin_addr.s_addr = peer_ip; 1424 c4iw_get_ep(&parent_ep->com); 1425 child_ep->parent_ep = parent_ep; 1426 child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid)); 1427 child_ep->l2t = l2t; 1428 child_ep->dst = dst; 1429 child_ep->hwtid = hwtid; 1430 child_ep->tx_chan = tx_chan; 1431 child_ep->smac_idx = smac_idx; 1432 child_ep->rss_qid = rss_qid; 1433 child_ep->mtu = mtu; 1434 child_ep->txq_idx = txq_idx; 1435 child_ep->ctrlq_idx = ctrlq_idx; 1436 1437 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__, 1438 tx_chan, smac_idx, rss_qid); 1439 1440 init_timer(&child_ep->timer); 1441 cxgb4_insert_tid(t, child_ep, hwtid); 1442 accept_cr(child_ep, peer_ip, skb, req); 1443 goto out; 1444 reject: 1445 reject_cr(dev, hwtid, peer_ip, skb); 1446 out: 1447 return 0; 1448 } 1449 1450 static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) 1451 { 1452 struct c4iw_ep *ep; 1453 struct cpl_pass_establish *req = cplhdr(skb); 1454 struct tid_info *t = dev->rdev.lldi.tids; 1455 unsigned int tid = GET_TID(req); 1456 1457 ep = lookup_tid(t, tid); 1458 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1459 ep->snd_seq = be32_to_cpu(req->snd_isn); 1460 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 1461 1462 set_emss(ep, ntohs(req->tcp_opt)); 1463 1464 dst_confirm(ep->dst); 1465 state_set(&ep->com, MPA_REQ_WAIT); 1466 start_ep_timer(ep); 1467 send_flowc(ep, skb); 1468 1469 return 0; 1470 } 1471 1472 static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) 1473 { 1474 struct cpl_peer_close *hdr = cplhdr(skb); 1475 struct c4iw_ep *ep; 1476 struct c4iw_qp_attributes attrs; 1477 int disconnect = 1; 1478 int release = 0; 1479 int closing = 0; 1480 struct tid_info *t = dev->rdev.lldi.tids; 1481 unsigned int tid = GET_TID(hdr); 1482 1483 ep = lookup_tid(t, tid); 1484 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1485 dst_confirm(ep->dst); 1486 1487 mutex_lock(&ep->com.mutex); 1488 switch (ep->com.state) { 1489 case MPA_REQ_WAIT: 1490 __state_set(&ep->com, CLOSING); 1491 break; 1492 case MPA_REQ_SENT: 1493 __state_set(&ep->com, CLOSING); 1494 connect_reply_upcall(ep, -ECONNRESET); 1495 break; 1496 case MPA_REQ_RCVD: 1497 1498 /* 1499 * We're gonna mark this puppy DEAD, but keep 1500 * the reference on it until the ULP accepts or 1501 * rejects the CR. Also wake up anyone waiting 1502 * in rdma connection migration (see c4iw_accept_cr()). 1503 */ 1504 __state_set(&ep->com, CLOSING); 1505 ep->com.wr_wait.done = 1; 1506 ep->com.wr_wait.ret = -ECONNRESET; 1507 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 1508 wake_up(&ep->com.wr_wait.wait); 1509 break; 1510 case MPA_REP_SENT: 1511 __state_set(&ep->com, CLOSING); 1512 ep->com.wr_wait.done = 1; 1513 ep->com.wr_wait.ret = -ECONNRESET; 1514 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 1515 wake_up(&ep->com.wr_wait.wait); 1516 break; 1517 case FPDU_MODE: 1518 start_ep_timer(ep); 1519 __state_set(&ep->com, CLOSING); 1520 closing = 1; 1521 peer_close_upcall(ep); 1522 break; 1523 case ABORTING: 1524 disconnect = 0; 1525 break; 1526 case CLOSING: 1527 __state_set(&ep->com, MORIBUND); 1528 disconnect = 0; 1529 break; 1530 case MORIBUND: 1531 stop_ep_timer(ep); 1532 if (ep->com.cm_id && ep->com.qp) { 1533 attrs.next_state = C4IW_QP_STATE_IDLE; 1534 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1535 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1536 } 1537 close_complete_upcall(ep); 1538 __state_set(&ep->com, DEAD); 1539 release = 1; 1540 disconnect = 0; 1541 break; 1542 case DEAD: 1543 disconnect = 0; 1544 break; 1545 default: 1546 BUG_ON(1); 1547 } 1548 mutex_unlock(&ep->com.mutex); 1549 if (closing) { 1550 attrs.next_state = C4IW_QP_STATE_CLOSING; 1551 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1552 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1553 } 1554 if (disconnect) 1555 c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 1556 if (release) 1557 release_ep_resources(ep); 1558 return 0; 1559 } 1560 1561 /* 1562 * Returns whether an ABORT_REQ_RSS message is a negative advice. 1563 */ 1564 static int is_neg_adv_abort(unsigned int status) 1565 { 1566 return status == CPL_ERR_RTX_NEG_ADVICE || 1567 status == CPL_ERR_PERSIST_NEG_ADVICE; 1568 } 1569 1570 static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) 1571 { 1572 struct cpl_abort_req_rss *req = cplhdr(skb); 1573 struct c4iw_ep *ep; 1574 struct cpl_abort_rpl *rpl; 1575 struct sk_buff *rpl_skb; 1576 struct c4iw_qp_attributes attrs; 1577 int ret; 1578 int release = 0; 1579 struct tid_info *t = dev->rdev.lldi.tids; 1580 unsigned int tid = GET_TID(req); 1581 1582 ep = lookup_tid(t, tid); 1583 if (is_neg_adv_abort(req->status)) { 1584 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, 1585 ep->hwtid); 1586 return 0; 1587 } 1588 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 1589 ep->com.state); 1590 1591 /* 1592 * Wake up any threads in rdma_init() or rdma_fini(). 1593 */ 1594 ep->com.wr_wait.done = 1; 1595 ep->com.wr_wait.ret = -ECONNRESET; 1596 wake_up(&ep->com.wr_wait.wait); 1597 1598 mutex_lock(&ep->com.mutex); 1599 switch (ep->com.state) { 1600 case CONNECTING: 1601 break; 1602 case MPA_REQ_WAIT: 1603 stop_ep_timer(ep); 1604 break; 1605 case MPA_REQ_SENT: 1606 stop_ep_timer(ep); 1607 connect_reply_upcall(ep, -ECONNRESET); 1608 break; 1609 case MPA_REP_SENT: 1610 break; 1611 case MPA_REQ_RCVD: 1612 break; 1613 case MORIBUND: 1614 case CLOSING: 1615 stop_ep_timer(ep); 1616 /*FALLTHROUGH*/ 1617 case FPDU_MODE: 1618 if (ep->com.cm_id && ep->com.qp) { 1619 attrs.next_state = C4IW_QP_STATE_ERROR; 1620 ret = c4iw_modify_qp(ep->com.qp->rhp, 1621 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 1622 &attrs, 1); 1623 if (ret) 1624 printk(KERN_ERR MOD 1625 "%s - qp <- error failed!\n", 1626 __func__); 1627 } 1628 peer_abort_upcall(ep); 1629 break; 1630 case ABORTING: 1631 break; 1632 case DEAD: 1633 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); 1634 mutex_unlock(&ep->com.mutex); 1635 return 0; 1636 default: 1637 BUG_ON(1); 1638 break; 1639 } 1640 dst_confirm(ep->dst); 1641 if (ep->com.state != ABORTING) { 1642 __state_set(&ep->com, DEAD); 1643 release = 1; 1644 } 1645 mutex_unlock(&ep->com.mutex); 1646 1647 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); 1648 if (!rpl_skb) { 1649 printk(KERN_ERR MOD "%s - cannot allocate skb!\n", 1650 __func__); 1651 release = 1; 1652 goto out; 1653 } 1654 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 1655 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); 1656 INIT_TP_WR(rpl, ep->hwtid); 1657 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); 1658 rpl->cmd = CPL_ABORT_NO_RST; 1659 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); 1660 out: 1661 if (release) 1662 release_ep_resources(ep); 1663 return 0; 1664 } 1665 1666 static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1667 { 1668 struct c4iw_ep *ep; 1669 struct c4iw_qp_attributes attrs; 1670 struct cpl_close_con_rpl *rpl = cplhdr(skb); 1671 int release = 0; 1672 struct tid_info *t = dev->rdev.lldi.tids; 1673 unsigned int tid = GET_TID(rpl); 1674 1675 ep = lookup_tid(t, tid); 1676 1677 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1678 BUG_ON(!ep); 1679 1680 /* The cm_id may be null if we failed to connect */ 1681 mutex_lock(&ep->com.mutex); 1682 switch (ep->com.state) { 1683 case CLOSING: 1684 __state_set(&ep->com, MORIBUND); 1685 break; 1686 case MORIBUND: 1687 stop_ep_timer(ep); 1688 if ((ep->com.cm_id) && (ep->com.qp)) { 1689 attrs.next_state = C4IW_QP_STATE_IDLE; 1690 c4iw_modify_qp(ep->com.qp->rhp, 1691 ep->com.qp, 1692 C4IW_QP_ATTR_NEXT_STATE, 1693 &attrs, 1); 1694 } 1695 close_complete_upcall(ep); 1696 __state_set(&ep->com, DEAD); 1697 release = 1; 1698 break; 1699 case ABORTING: 1700 case DEAD: 1701 break; 1702 default: 1703 BUG_ON(1); 1704 break; 1705 } 1706 mutex_unlock(&ep->com.mutex); 1707 if (release) 1708 release_ep_resources(ep); 1709 return 0; 1710 } 1711 1712 static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) 1713 { 1714 struct cpl_rdma_terminate *rpl = cplhdr(skb); 1715 struct tid_info *t = dev->rdev.lldi.tids; 1716 unsigned int tid = GET_TID(rpl); 1717 struct c4iw_ep *ep; 1718 struct c4iw_qp_attributes attrs; 1719 1720 ep = lookup_tid(t, tid); 1721 BUG_ON(!ep); 1722 1723 if (ep->com.qp) { 1724 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, 1725 ep->com.qp->wq.sq.qid); 1726 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1727 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1728 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1729 } else 1730 printk(KERN_WARNING MOD "TERM received tid %u no qp\n", tid); 1731 1732 return 0; 1733 } 1734 1735 /* 1736 * Upcall from the adapter indicating data has been transmitted. 1737 * For us its just the single MPA request or reply. We can now free 1738 * the skb holding the mpa message. 1739 */ 1740 static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) 1741 { 1742 struct c4iw_ep *ep; 1743 struct cpl_fw4_ack *hdr = cplhdr(skb); 1744 u8 credits = hdr->credits; 1745 unsigned int tid = GET_TID(hdr); 1746 struct tid_info *t = dev->rdev.lldi.tids; 1747 1748 1749 ep = lookup_tid(t, tid); 1750 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); 1751 if (credits == 0) { 1752 PDBG("%s 0 credit ack ep %p tid %u state %u\n", 1753 __func__, ep, ep->hwtid, state_read(&ep->com)); 1754 return 0; 1755 } 1756 1757 dst_confirm(ep->dst); 1758 if (ep->mpa_skb) { 1759 PDBG("%s last streaming msg ack ep %p tid %u state %u " 1760 "initiator %u freeing skb\n", __func__, ep, ep->hwtid, 1761 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); 1762 kfree_skb(ep->mpa_skb); 1763 ep->mpa_skb = NULL; 1764 } 1765 return 0; 1766 } 1767 1768 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 1769 { 1770 int err; 1771 struct c4iw_ep *ep = to_ep(cm_id); 1772 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1773 1774 if (state_read(&ep->com) == DEAD) { 1775 c4iw_put_ep(&ep->com); 1776 return -ECONNRESET; 1777 } 1778 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 1779 if (mpa_rev == 0) 1780 abort_connection(ep, NULL, GFP_KERNEL); 1781 else { 1782 err = send_mpa_reject(ep, pdata, pdata_len); 1783 err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 1784 } 1785 c4iw_put_ep(&ep->com); 1786 return 0; 1787 } 1788 1789 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 1790 { 1791 int err; 1792 struct c4iw_qp_attributes attrs; 1793 enum c4iw_qp_attr_mask mask; 1794 struct c4iw_ep *ep = to_ep(cm_id); 1795 struct c4iw_dev *h = to_c4iw_dev(cm_id->device); 1796 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); 1797 1798 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1799 if (state_read(&ep->com) == DEAD) { 1800 err = -ECONNRESET; 1801 goto err; 1802 } 1803 1804 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 1805 BUG_ON(!qp); 1806 1807 if ((conn_param->ord > c4iw_max_read_depth) || 1808 (conn_param->ird > c4iw_max_read_depth)) { 1809 abort_connection(ep, NULL, GFP_KERNEL); 1810 err = -EINVAL; 1811 goto err; 1812 } 1813 1814 cm_id->add_ref(cm_id); 1815 ep->com.cm_id = cm_id; 1816 ep->com.qp = qp; 1817 1818 ep->ird = conn_param->ird; 1819 ep->ord = conn_param->ord; 1820 1821 if (peer2peer && ep->ird == 0) 1822 ep->ird = 1; 1823 1824 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); 1825 1826 /* bind QP to EP and move to RTS */ 1827 attrs.mpa_attr = ep->mpa_attr; 1828 attrs.max_ird = ep->ird; 1829 attrs.max_ord = ep->ord; 1830 attrs.llp_stream_handle = ep; 1831 attrs.next_state = C4IW_QP_STATE_RTS; 1832 1833 /* bind QP and TID with INIT_WR */ 1834 mask = C4IW_QP_ATTR_NEXT_STATE | 1835 C4IW_QP_ATTR_LLP_STREAM_HANDLE | 1836 C4IW_QP_ATTR_MPA_ATTR | 1837 C4IW_QP_ATTR_MAX_IRD | 1838 C4IW_QP_ATTR_MAX_ORD; 1839 1840 err = c4iw_modify_qp(ep->com.qp->rhp, 1841 ep->com.qp, mask, &attrs, 1); 1842 if (err) 1843 goto err1; 1844 err = send_mpa_reply(ep, conn_param->private_data, 1845 conn_param->private_data_len); 1846 if (err) 1847 goto err1; 1848 1849 state_set(&ep->com, FPDU_MODE); 1850 established_upcall(ep); 1851 c4iw_put_ep(&ep->com); 1852 return 0; 1853 err1: 1854 ep->com.cm_id = NULL; 1855 ep->com.qp = NULL; 1856 cm_id->rem_ref(cm_id); 1857 err: 1858 c4iw_put_ep(&ep->com); 1859 return err; 1860 } 1861 1862 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 1863 { 1864 int err = 0; 1865 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 1866 struct c4iw_ep *ep; 1867 struct rtable *rt; 1868 struct net_device *pdev; 1869 int step; 1870 1871 if ((conn_param->ord > c4iw_max_read_depth) || 1872 (conn_param->ird > c4iw_max_read_depth)) { 1873 err = -EINVAL; 1874 goto out; 1875 } 1876 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 1877 if (!ep) { 1878 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 1879 err = -ENOMEM; 1880 goto out; 1881 } 1882 init_timer(&ep->timer); 1883 ep->plen = conn_param->private_data_len; 1884 if (ep->plen) 1885 memcpy(ep->mpa_pkt + sizeof(struct mpa_message), 1886 conn_param->private_data, ep->plen); 1887 ep->ird = conn_param->ird; 1888 ep->ord = conn_param->ord; 1889 1890 if (peer2peer && ep->ord == 0) 1891 ep->ord = 1; 1892 1893 cm_id->add_ref(cm_id); 1894 ep->com.dev = dev; 1895 ep->com.cm_id = cm_id; 1896 ep->com.qp = get_qhp(dev, conn_param->qpn); 1897 BUG_ON(!ep->com.qp); 1898 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, 1899 ep->com.qp, cm_id); 1900 1901 /* 1902 * Allocate an active TID to initiate a TCP connection. 1903 */ 1904 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep); 1905 if (ep->atid == -1) { 1906 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); 1907 err = -ENOMEM; 1908 goto fail2; 1909 } 1910 1911 PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__, 1912 ntohl(cm_id->local_addr.sin_addr.s_addr), 1913 ntohs(cm_id->local_addr.sin_port), 1914 ntohl(cm_id->remote_addr.sin_addr.s_addr), 1915 ntohs(cm_id->remote_addr.sin_port)); 1916 1917 /* find a route */ 1918 rt = find_route(dev, 1919 cm_id->local_addr.sin_addr.s_addr, 1920 cm_id->remote_addr.sin_addr.s_addr, 1921 cm_id->local_addr.sin_port, 1922 cm_id->remote_addr.sin_port, 0); 1923 if (!rt) { 1924 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); 1925 err = -EHOSTUNREACH; 1926 goto fail3; 1927 } 1928 ep->dst = &rt->dst; 1929 1930 /* get a l2t entry */ 1931 if (ep->dst->neighbour->dev->flags & IFF_LOOPBACK) { 1932 PDBG("%s LOOPBACK\n", __func__); 1933 pdev = ip_dev_find(&init_net, 1934 cm_id->remote_addr.sin_addr.s_addr); 1935 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, 1936 ep->dst->neighbour, 1937 pdev, 0); 1938 ep->mtu = pdev->mtu; 1939 ep->tx_chan = cxgb4_port_chan(pdev); 1940 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; 1941 step = ep->com.dev->rdev.lldi.ntxq / 1942 ep->com.dev->rdev.lldi.nchan; 1943 ep->txq_idx = cxgb4_port_idx(pdev) * step; 1944 step = ep->com.dev->rdev.lldi.nrxq / 1945 ep->com.dev->rdev.lldi.nchan; 1946 ep->ctrlq_idx = cxgb4_port_idx(pdev); 1947 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ 1948 cxgb4_port_idx(pdev) * step]; 1949 dev_put(pdev); 1950 } else { 1951 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, 1952 ep->dst->neighbour, 1953 ep->dst->neighbour->dev, 0); 1954 ep->mtu = dst_mtu(ep->dst); 1955 ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev); 1956 ep->smac_idx = (cxgb4_port_viid(ep->dst->neighbour->dev) & 1957 0x7F) << 1; 1958 step = ep->com.dev->rdev.lldi.ntxq / 1959 ep->com.dev->rdev.lldi.nchan; 1960 ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step; 1961 ep->ctrlq_idx = cxgb4_port_idx(ep->dst->neighbour->dev); 1962 step = ep->com.dev->rdev.lldi.nrxq / 1963 ep->com.dev->rdev.lldi.nchan; 1964 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ 1965 cxgb4_port_idx(ep->dst->neighbour->dev) * step]; 1966 } 1967 if (!ep->l2t) { 1968 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 1969 err = -ENOMEM; 1970 goto fail4; 1971 } 1972 1973 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 1974 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 1975 ep->l2t->idx); 1976 1977 state_set(&ep->com, CONNECTING); 1978 ep->tos = 0; 1979 ep->com.local_addr = cm_id->local_addr; 1980 ep->com.remote_addr = cm_id->remote_addr; 1981 1982 /* send connect request to rnic */ 1983 err = send_connect(ep); 1984 if (!err) 1985 goto out; 1986 1987 cxgb4_l2t_release(ep->l2t); 1988 fail4: 1989 dst_release(ep->dst); 1990 fail3: 1991 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 1992 fail2: 1993 cm_id->rem_ref(cm_id); 1994 c4iw_put_ep(&ep->com); 1995 out: 1996 return err; 1997 } 1998 1999 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) 2000 { 2001 int err = 0; 2002 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2003 struct c4iw_listen_ep *ep; 2004 2005 2006 might_sleep(); 2007 2008 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 2009 if (!ep) { 2010 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 2011 err = -ENOMEM; 2012 goto fail1; 2013 } 2014 PDBG("%s ep %p\n", __func__, ep); 2015 cm_id->add_ref(cm_id); 2016 ep->com.cm_id = cm_id; 2017 ep->com.dev = dev; 2018 ep->backlog = backlog; 2019 ep->com.local_addr = cm_id->local_addr; 2020 2021 /* 2022 * Allocate a server TID. 2023 */ 2024 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep); 2025 if (ep->stid == -1) { 2026 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__); 2027 err = -ENOMEM; 2028 goto fail2; 2029 } 2030 2031 state_set(&ep->com, LISTEN); 2032 c4iw_init_wr_wait(&ep->com.wr_wait); 2033 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid, 2034 ep->com.local_addr.sin_addr.s_addr, 2035 ep->com.local_addr.sin_port, 2036 ep->com.dev->rdev.lldi.rxq_ids[0]); 2037 if (err) 2038 goto fail3; 2039 2040 /* wait for pass_open_rpl */ 2041 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, 2042 __func__); 2043 if (!err) { 2044 cm_id->provider_data = ep; 2045 goto out; 2046 } 2047 fail3: 2048 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); 2049 fail2: 2050 cm_id->rem_ref(cm_id); 2051 c4iw_put_ep(&ep->com); 2052 fail1: 2053 out: 2054 return err; 2055 } 2056 2057 int c4iw_destroy_listen(struct iw_cm_id *cm_id) 2058 { 2059 int err; 2060 struct c4iw_listen_ep *ep = to_listen_ep(cm_id); 2061 2062 PDBG("%s ep %p\n", __func__, ep); 2063 2064 might_sleep(); 2065 state_set(&ep->com, DEAD); 2066 c4iw_init_wr_wait(&ep->com.wr_wait); 2067 err = listen_stop(ep); 2068 if (err) 2069 goto done; 2070 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, 2071 __func__); 2072 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); 2073 done: 2074 cm_id->rem_ref(cm_id); 2075 c4iw_put_ep(&ep->com); 2076 return err; 2077 } 2078 2079 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) 2080 { 2081 int ret = 0; 2082 int close = 0; 2083 int fatal = 0; 2084 struct c4iw_rdev *rdev; 2085 2086 mutex_lock(&ep->com.mutex); 2087 2088 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, 2089 states[ep->com.state], abrupt); 2090 2091 rdev = &ep->com.dev->rdev; 2092 if (c4iw_fatal_error(rdev)) { 2093 fatal = 1; 2094 close_complete_upcall(ep); 2095 ep->com.state = DEAD; 2096 } 2097 switch (ep->com.state) { 2098 case MPA_REQ_WAIT: 2099 case MPA_REQ_SENT: 2100 case MPA_REQ_RCVD: 2101 case MPA_REP_SENT: 2102 case FPDU_MODE: 2103 close = 1; 2104 if (abrupt) 2105 ep->com.state = ABORTING; 2106 else { 2107 ep->com.state = CLOSING; 2108 start_ep_timer(ep); 2109 } 2110 set_bit(CLOSE_SENT, &ep->com.flags); 2111 break; 2112 case CLOSING: 2113 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { 2114 close = 1; 2115 if (abrupt) { 2116 stop_ep_timer(ep); 2117 ep->com.state = ABORTING; 2118 } else 2119 ep->com.state = MORIBUND; 2120 } 2121 break; 2122 case MORIBUND: 2123 case ABORTING: 2124 case DEAD: 2125 PDBG("%s ignoring disconnect ep %p state %u\n", 2126 __func__, ep, ep->com.state); 2127 break; 2128 default: 2129 BUG(); 2130 break; 2131 } 2132 2133 mutex_unlock(&ep->com.mutex); 2134 if (close) { 2135 if (abrupt) 2136 ret = abort_connection(ep, NULL, gfp); 2137 else 2138 ret = send_halfclose(ep, gfp); 2139 if (ret) 2140 fatal = 1; 2141 } 2142 if (fatal) 2143 release_ep_resources(ep); 2144 return ret; 2145 } 2146 2147 static int async_event(struct c4iw_dev *dev, struct sk_buff *skb) 2148 { 2149 struct cpl_fw6_msg *rpl = cplhdr(skb); 2150 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); 2151 return 0; 2152 } 2153 2154 /* 2155 * These are the real handlers that are called from a 2156 * work queue. 2157 */ 2158 static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = { 2159 [CPL_ACT_ESTABLISH] = act_establish, 2160 [CPL_ACT_OPEN_RPL] = act_open_rpl, 2161 [CPL_RX_DATA] = rx_data, 2162 [CPL_ABORT_RPL_RSS] = abort_rpl, 2163 [CPL_ABORT_RPL] = abort_rpl, 2164 [CPL_PASS_OPEN_RPL] = pass_open_rpl, 2165 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl, 2166 [CPL_PASS_ACCEPT_REQ] = pass_accept_req, 2167 [CPL_PASS_ESTABLISH] = pass_establish, 2168 [CPL_PEER_CLOSE] = peer_close, 2169 [CPL_ABORT_REQ_RSS] = peer_abort, 2170 [CPL_CLOSE_CON_RPL] = close_con_rpl, 2171 [CPL_RDMA_TERMINATE] = terminate, 2172 [CPL_FW4_ACK] = fw4_ack, 2173 [CPL_FW6_MSG] = async_event 2174 }; 2175 2176 static void process_timeout(struct c4iw_ep *ep) 2177 { 2178 struct c4iw_qp_attributes attrs; 2179 int abort = 1; 2180 2181 mutex_lock(&ep->com.mutex); 2182 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, 2183 ep->com.state); 2184 switch (ep->com.state) { 2185 case MPA_REQ_SENT: 2186 __state_set(&ep->com, ABORTING); 2187 connect_reply_upcall(ep, -ETIMEDOUT); 2188 break; 2189 case MPA_REQ_WAIT: 2190 __state_set(&ep->com, ABORTING); 2191 break; 2192 case CLOSING: 2193 case MORIBUND: 2194 if (ep->com.cm_id && ep->com.qp) { 2195 attrs.next_state = C4IW_QP_STATE_ERROR; 2196 c4iw_modify_qp(ep->com.qp->rhp, 2197 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 2198 &attrs, 1); 2199 } 2200 __state_set(&ep->com, ABORTING); 2201 break; 2202 default: 2203 printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n", 2204 __func__, ep, ep->hwtid, ep->com.state); 2205 WARN_ON(1); 2206 abort = 0; 2207 } 2208 mutex_unlock(&ep->com.mutex); 2209 if (abort) 2210 abort_connection(ep, NULL, GFP_KERNEL); 2211 c4iw_put_ep(&ep->com); 2212 } 2213 2214 static void process_timedout_eps(void) 2215 { 2216 struct c4iw_ep *ep; 2217 2218 spin_lock_irq(&timeout_lock); 2219 while (!list_empty(&timeout_list)) { 2220 struct list_head *tmp; 2221 2222 tmp = timeout_list.next; 2223 list_del(tmp); 2224 spin_unlock_irq(&timeout_lock); 2225 ep = list_entry(tmp, struct c4iw_ep, entry); 2226 process_timeout(ep); 2227 spin_lock_irq(&timeout_lock); 2228 } 2229 spin_unlock_irq(&timeout_lock); 2230 } 2231 2232 static void process_work(struct work_struct *work) 2233 { 2234 struct sk_buff *skb = NULL; 2235 struct c4iw_dev *dev; 2236 struct cpl_act_establish *rpl; 2237 unsigned int opcode; 2238 int ret; 2239 2240 while ((skb = skb_dequeue(&rxq))) { 2241 rpl = cplhdr(skb); 2242 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); 2243 opcode = rpl->ot.opcode; 2244 2245 BUG_ON(!work_handlers[opcode]); 2246 ret = work_handlers[opcode](dev, skb); 2247 if (!ret) 2248 kfree_skb(skb); 2249 } 2250 process_timedout_eps(); 2251 } 2252 2253 static DECLARE_WORK(skb_work, process_work); 2254 2255 static void ep_timeout(unsigned long arg) 2256 { 2257 struct c4iw_ep *ep = (struct c4iw_ep *)arg; 2258 2259 spin_lock(&timeout_lock); 2260 list_add_tail(&ep->entry, &timeout_list); 2261 spin_unlock(&timeout_lock); 2262 queue_work(workq, &skb_work); 2263 } 2264 2265 /* 2266 * All the CM events are handled on a work queue to have a safe context. 2267 */ 2268 static int sched(struct c4iw_dev *dev, struct sk_buff *skb) 2269 { 2270 2271 /* 2272 * Save dev in the skb->cb area. 2273 */ 2274 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev; 2275 2276 /* 2277 * Queue the skb and schedule the worker thread. 2278 */ 2279 skb_queue_tail(&rxq, skb); 2280 queue_work(workq, &skb_work); 2281 return 0; 2282 } 2283 2284 static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2285 { 2286 struct cpl_set_tcb_rpl *rpl = cplhdr(skb); 2287 2288 if (rpl->status != CPL_ERR_NONE) { 2289 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u " 2290 "for tid %u\n", rpl->status, GET_TID(rpl)); 2291 } 2292 kfree_skb(skb); 2293 return 0; 2294 } 2295 2296 static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) 2297 { 2298 struct cpl_fw6_msg *rpl = cplhdr(skb); 2299 struct c4iw_wr_wait *wr_waitp; 2300 int ret; 2301 2302 PDBG("%s type %u\n", __func__, rpl->type); 2303 2304 switch (rpl->type) { 2305 case 1: 2306 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); 2307 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; 2308 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); 2309 if (wr_waitp) { 2310 if (ret) 2311 wr_waitp->ret = -ret; 2312 else 2313 wr_waitp->ret = 0; 2314 wr_waitp->done = 1; 2315 wake_up(&wr_waitp->wait); 2316 } 2317 kfree_skb(skb); 2318 break; 2319 case 2: 2320 sched(dev, skb); 2321 break; 2322 default: 2323 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, 2324 rpl->type); 2325 kfree_skb(skb); 2326 break; 2327 } 2328 return 0; 2329 } 2330 2331 /* 2332 * Most upcalls from the T4 Core go to sched() to 2333 * schedule the processing on a work queue. 2334 */ 2335 c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = { 2336 [CPL_ACT_ESTABLISH] = sched, 2337 [CPL_ACT_OPEN_RPL] = sched, 2338 [CPL_RX_DATA] = sched, 2339 [CPL_ABORT_RPL_RSS] = sched, 2340 [CPL_ABORT_RPL] = sched, 2341 [CPL_PASS_OPEN_RPL] = sched, 2342 [CPL_CLOSE_LISTSRV_RPL] = sched, 2343 [CPL_PASS_ACCEPT_REQ] = sched, 2344 [CPL_PASS_ESTABLISH] = sched, 2345 [CPL_PEER_CLOSE] = sched, 2346 [CPL_CLOSE_CON_RPL] = sched, 2347 [CPL_ABORT_REQ_RSS] = sched, 2348 [CPL_RDMA_TERMINATE] = sched, 2349 [CPL_FW4_ACK] = sched, 2350 [CPL_SET_TCB_RPL] = set_tcb_rpl, 2351 [CPL_FW6_MSG] = fw6_msg 2352 }; 2353 2354 int __init c4iw_cm_init(void) 2355 { 2356 spin_lock_init(&timeout_lock); 2357 skb_queue_head_init(&rxq); 2358 2359 workq = create_singlethread_workqueue("iw_cxgb4"); 2360 if (!workq) 2361 return -ENOMEM; 2362 2363 return 0; 2364 } 2365 2366 void __exit c4iw_cm_term(void) 2367 { 2368 WARN_ON(!list_empty(&timeout_list)); 2369 flush_workqueue(workq); 2370 destroy_workqueue(workq); 2371 } 2372