1 /* 2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/module.h> 33 #include <linux/list.h> 34 #include <linux/workqueue.h> 35 #include <linux/skbuff.h> 36 #include <linux/timer.h> 37 #include <linux/notifier.h> 38 #include <linux/inetdevice.h> 39 #include <linux/ip.h> 40 #include <linux/tcp.h> 41 42 #include <net/neighbour.h> 43 #include <net/netevent.h> 44 #include <net/route.h> 45 46 #include "iw_cxgb4.h" 47 48 static char *states[] = { 49 "idle", 50 "listen", 51 "connecting", 52 "mpa_wait_req", 53 "mpa_req_sent", 54 "mpa_req_rcvd", 55 "mpa_rep_sent", 56 "fpdu_mode", 57 "aborting", 58 "closing", 59 "moribund", 60 "dead", 61 NULL, 62 }; 63 64 static int dack_mode = 1; 65 module_param(dack_mode, int, 0644); 66 MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)"); 67 68 int c4iw_max_read_depth = 8; 69 module_param(c4iw_max_read_depth, int, 0644); 70 MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)"); 71 72 static int enable_tcp_timestamps; 73 module_param(enable_tcp_timestamps, int, 0644); 74 MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); 75 76 static int enable_tcp_sack; 77 module_param(enable_tcp_sack, int, 0644); 78 MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)"); 79 80 static int enable_tcp_window_scaling = 1; 81 module_param(enable_tcp_window_scaling, int, 0644); 82 MODULE_PARM_DESC(enable_tcp_window_scaling, 83 "Enable tcp window scaling (default=1)"); 84 85 int c4iw_debug; 86 module_param(c4iw_debug, int, 0644); 87 MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)"); 88 89 static int peer2peer; 90 module_param(peer2peer, int, 0644); 91 MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)"); 92 93 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; 94 module_param(p2p_type, int, 0644); 95 MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: " 96 "1=RDMA_READ 0=RDMA_WRITE (default 1)"); 97 98 static int ep_timeout_secs = 60; 99 module_param(ep_timeout_secs, int, 0644); 100 MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " 101 "in seconds (default=60)"); 102 103 static int mpa_rev = 1; 104 module_param(mpa_rev, int, 0644); 105 MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " 106 "1 is spec compliant. (default=1)"); 107 108 static int markers_enabled; 109 module_param(markers_enabled, int, 0644); 110 MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)"); 111 112 static int crc_enabled = 1; 113 module_param(crc_enabled, int, 0644); 114 MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)"); 115 116 static int rcv_win = 256 * 1024; 117 module_param(rcv_win, int, 0644); 118 MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)"); 119 120 static int snd_win = 128 * 1024; 121 module_param(snd_win, int, 0644); 122 MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)"); 123 124 static struct workqueue_struct *workq; 125 126 static struct sk_buff_head rxq; 127 128 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); 129 static void ep_timeout(unsigned long arg); 130 static void connect_reply_upcall(struct c4iw_ep *ep, int status); 131 132 static LIST_HEAD(timeout_list); 133 static spinlock_t timeout_lock; 134 135 static void start_ep_timer(struct c4iw_ep *ep) 136 { 137 PDBG("%s ep %p\n", __func__, ep); 138 if (timer_pending(&ep->timer)) { 139 PDBG("%s stopped / restarted timer ep %p\n", __func__, ep); 140 del_timer_sync(&ep->timer); 141 } else 142 c4iw_get_ep(&ep->com); 143 ep->timer.expires = jiffies + ep_timeout_secs * HZ; 144 ep->timer.data = (unsigned long)ep; 145 ep->timer.function = ep_timeout; 146 add_timer(&ep->timer); 147 } 148 149 static void stop_ep_timer(struct c4iw_ep *ep) 150 { 151 PDBG("%s ep %p\n", __func__, ep); 152 if (!timer_pending(&ep->timer)) { 153 printk(KERN_ERR "%s timer stopped when its not running! " 154 "ep %p state %u\n", __func__, ep, ep->com.state); 155 WARN_ON(1); 156 return; 157 } 158 del_timer_sync(&ep->timer); 159 c4iw_put_ep(&ep->com); 160 } 161 162 static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, 163 struct l2t_entry *l2e) 164 { 165 int error = 0; 166 167 if (c4iw_fatal_error(rdev)) { 168 kfree_skb(skb); 169 PDBG("%s - device in error state - dropping\n", __func__); 170 return -EIO; 171 } 172 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); 173 if (error < 0) 174 kfree_skb(skb); 175 return error < 0 ? error : 0; 176 } 177 178 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) 179 { 180 int error = 0; 181 182 if (c4iw_fatal_error(rdev)) { 183 kfree_skb(skb); 184 PDBG("%s - device in error state - dropping\n", __func__); 185 return -EIO; 186 } 187 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); 188 if (error < 0) 189 kfree_skb(skb); 190 return error < 0 ? error : 0; 191 } 192 193 static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) 194 { 195 struct cpl_tid_release *req; 196 197 skb = get_skb(skb, sizeof *req, GFP_KERNEL); 198 if (!skb) 199 return; 200 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); 201 INIT_TP_WR(req, hwtid); 202 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); 203 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); 204 c4iw_ofld_send(rdev, skb); 205 return; 206 } 207 208 static void set_emss(struct c4iw_ep *ep, u16 opt) 209 { 210 ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40; 211 ep->mss = ep->emss; 212 if (GET_TCPOPT_TSTAMP(opt)) 213 ep->emss -= 12; 214 if (ep->emss < 128) 215 ep->emss = 128; 216 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt), 217 ep->mss, ep->emss); 218 } 219 220 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) 221 { 222 enum c4iw_ep_state state; 223 224 mutex_lock(&epc->mutex); 225 state = epc->state; 226 mutex_unlock(&epc->mutex); 227 return state; 228 } 229 230 static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 231 { 232 epc->state = new; 233 } 234 235 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 236 { 237 mutex_lock(&epc->mutex); 238 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]); 239 __state_set(epc, new); 240 mutex_unlock(&epc->mutex); 241 return; 242 } 243 244 static void *alloc_ep(int size, gfp_t gfp) 245 { 246 struct c4iw_ep_common *epc; 247 248 epc = kzalloc(size, gfp); 249 if (epc) { 250 kref_init(&epc->kref); 251 mutex_init(&epc->mutex); 252 c4iw_init_wr_wait(&epc->wr_wait); 253 } 254 PDBG("%s alloc ep %p\n", __func__, epc); 255 return epc; 256 } 257 258 void _c4iw_free_ep(struct kref *kref) 259 { 260 struct c4iw_ep *ep; 261 262 ep = container_of(kref, struct c4iw_ep, com.kref); 263 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); 264 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { 265 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); 266 dst_release(ep->dst); 267 cxgb4_l2t_release(ep->l2t); 268 } 269 kfree(ep); 270 } 271 272 static void release_ep_resources(struct c4iw_ep *ep) 273 { 274 set_bit(RELEASE_RESOURCES, &ep->com.flags); 275 c4iw_put_ep(&ep->com); 276 } 277 278 static int status2errno(int status) 279 { 280 switch (status) { 281 case CPL_ERR_NONE: 282 return 0; 283 case CPL_ERR_CONN_RESET: 284 return -ECONNRESET; 285 case CPL_ERR_ARP_MISS: 286 return -EHOSTUNREACH; 287 case CPL_ERR_CONN_TIMEDOUT: 288 return -ETIMEDOUT; 289 case CPL_ERR_TCAM_FULL: 290 return -ENOMEM; 291 case CPL_ERR_CONN_EXIST: 292 return -EADDRINUSE; 293 default: 294 return -EIO; 295 } 296 } 297 298 /* 299 * Try and reuse skbs already allocated... 300 */ 301 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) 302 { 303 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) { 304 skb_trim(skb, 0); 305 skb_get(skb); 306 skb_reset_transport_header(skb); 307 } else { 308 skb = alloc_skb(len, gfp); 309 } 310 return skb; 311 } 312 313 static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip, 314 __be32 peer_ip, __be16 local_port, 315 __be16 peer_port, u8 tos) 316 { 317 struct rtable *rt; 318 319 rt = ip_route_output_ports(&init_net, NULL, peer_ip, local_ip, 320 peer_port, local_port, IPPROTO_TCP, 321 tos, 0); 322 if (IS_ERR(rt)) 323 return NULL; 324 return rt; 325 } 326 327 static void arp_failure_discard(void *handle, struct sk_buff *skb) 328 { 329 PDBG("%s c4iw_dev %p\n", __func__, handle); 330 kfree_skb(skb); 331 } 332 333 /* 334 * Handle an ARP failure for an active open. 335 */ 336 static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) 337 { 338 printk(KERN_ERR MOD "ARP failure duing connect\n"); 339 kfree_skb(skb); 340 } 341 342 /* 343 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant 344 * and send it along. 345 */ 346 static void abort_arp_failure(void *handle, struct sk_buff *skb) 347 { 348 struct c4iw_rdev *rdev = handle; 349 struct cpl_abort_req *req = cplhdr(skb); 350 351 PDBG("%s rdev %p\n", __func__, rdev); 352 req->cmd = CPL_ABORT_NO_RST; 353 c4iw_ofld_send(rdev, skb); 354 } 355 356 static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) 357 { 358 unsigned int flowclen = 80; 359 struct fw_flowc_wr *flowc; 360 int i; 361 362 skb = get_skb(skb, flowclen, GFP_KERNEL); 363 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); 364 365 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) | 366 FW_FLOWC_WR_NPARAMS(8)); 367 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen, 368 16)) | FW_WR_FLOWID(ep->hwtid)); 369 370 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 371 flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8); 372 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 373 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); 374 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 375 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan); 376 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 377 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid); 378 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 379 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq); 380 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 381 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); 382 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 383 flowc->mnemval[6].val = cpu_to_be32(snd_win); 384 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 385 flowc->mnemval[7].val = cpu_to_be32(ep->emss); 386 /* Pad WR to 16 byte boundary */ 387 flowc->mnemval[8].mnemonic = 0; 388 flowc->mnemval[8].val = 0; 389 for (i = 0; i < 9; i++) { 390 flowc->mnemval[i].r4[0] = 0; 391 flowc->mnemval[i].r4[1] = 0; 392 flowc->mnemval[i].r4[2] = 0; 393 } 394 395 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 396 c4iw_ofld_send(&ep->com.dev->rdev, skb); 397 } 398 399 static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp) 400 { 401 struct cpl_close_con_req *req; 402 struct sk_buff *skb; 403 int wrlen = roundup(sizeof *req, 16); 404 405 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 406 skb = get_skb(NULL, wrlen, gfp); 407 if (!skb) { 408 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); 409 return -ENOMEM; 410 } 411 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 412 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 413 req = (struct cpl_close_con_req *) skb_put(skb, wrlen); 414 memset(req, 0, wrlen); 415 INIT_TP_WR(req, ep->hwtid); 416 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, 417 ep->hwtid)); 418 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 419 } 420 421 static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) 422 { 423 struct cpl_abort_req *req; 424 int wrlen = roundup(sizeof *req, 16); 425 426 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 427 skb = get_skb(skb, wrlen, gfp); 428 if (!skb) { 429 printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 430 __func__); 431 return -ENOMEM; 432 } 433 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 434 t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure); 435 req = (struct cpl_abort_req *) skb_put(skb, wrlen); 436 memset(req, 0, wrlen); 437 INIT_TP_WR(req, ep->hwtid); 438 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); 439 req->cmd = CPL_ABORT_SEND_RST; 440 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 441 } 442 443 static int send_connect(struct c4iw_ep *ep) 444 { 445 struct cpl_act_open_req *req; 446 struct sk_buff *skb; 447 u64 opt0; 448 u32 opt2; 449 unsigned int mtu_idx; 450 int wscale; 451 int wrlen = roundup(sizeof *req, 16); 452 453 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); 454 455 skb = get_skb(NULL, wrlen, GFP_KERNEL); 456 if (!skb) { 457 printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 458 __func__); 459 return -ENOMEM; 460 } 461 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 462 463 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 464 wscale = compute_wscale(rcv_win); 465 opt0 = KEEP_ALIVE(1) | 466 DELACK(1) | 467 WND_SCALE(wscale) | 468 MSS_IDX(mtu_idx) | 469 L2T_IDX(ep->l2t->idx) | 470 TX_CHAN(ep->tx_chan) | 471 SMAC_SEL(ep->smac_idx) | 472 DSCP(ep->tos) | 473 ULP_MODE(ULP_MODE_TCPDDP) | 474 RCV_BUFSIZ(rcv_win>>10); 475 opt2 = RX_CHANNEL(0) | 476 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 477 if (enable_tcp_timestamps) 478 opt2 |= TSTAMPS_EN(1); 479 if (enable_tcp_sack) 480 opt2 |= SACK_EN(1); 481 if (wscale && enable_tcp_window_scaling) 482 opt2 |= WND_SCALE_EN(1); 483 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); 484 485 req = (struct cpl_act_open_req *) skb_put(skb, wrlen); 486 INIT_TP_WR(req, 0); 487 OPCODE_TID(req) = cpu_to_be32( 488 MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid))); 489 req->local_port = ep->com.local_addr.sin_port; 490 req->peer_port = ep->com.remote_addr.sin_port; 491 req->local_ip = ep->com.local_addr.sin_addr.s_addr; 492 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; 493 req->opt0 = cpu_to_be64(opt0); 494 req->params = 0; 495 req->opt2 = cpu_to_be32(opt2); 496 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 497 } 498 499 static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb) 500 { 501 int mpalen, wrlen; 502 struct fw_ofld_tx_data_wr *req; 503 struct mpa_message *mpa; 504 505 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 506 507 BUG_ON(skb_cloned(skb)); 508 509 mpalen = sizeof(*mpa) + ep->plen; 510 wrlen = roundup(mpalen + sizeof *req, 16); 511 skb = get_skb(skb, wrlen, GFP_KERNEL); 512 if (!skb) { 513 connect_reply_upcall(ep, -ENOMEM); 514 return; 515 } 516 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 517 518 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 519 memset(req, 0, wrlen); 520 req->op_to_immdlen = cpu_to_be32( 521 FW_WR_OP(FW_OFLD_TX_DATA_WR) | 522 FW_WR_COMPL(1) | 523 FW_WR_IMMDLEN(mpalen)); 524 req->flowid_len16 = cpu_to_be32( 525 FW_WR_FLOWID(ep->hwtid) | 526 FW_WR_LEN16(wrlen >> 4)); 527 req->plen = cpu_to_be32(mpalen); 528 req->tunnel_to_proxy = cpu_to_be32( 529 FW_OFLD_TX_DATA_WR_FLUSH(1) | 530 FW_OFLD_TX_DATA_WR_SHOVE(1)); 531 532 mpa = (struct mpa_message *)(req + 1); 533 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); 534 mpa->flags = (crc_enabled ? MPA_CRC : 0) | 535 (markers_enabled ? MPA_MARKERS : 0); 536 mpa->private_data_size = htons(ep->plen); 537 mpa->revision = mpa_rev; 538 539 if (ep->plen) 540 memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen); 541 542 /* 543 * Reference the mpa skb. This ensures the data area 544 * will remain in memory until the hw acks the tx. 545 * Function fw4_ack() will deref it. 546 */ 547 skb_get(skb); 548 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 549 BUG_ON(ep->mpa_skb); 550 ep->mpa_skb = skb; 551 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 552 start_ep_timer(ep); 553 state_set(&ep->com, MPA_REQ_SENT); 554 ep->mpa_attr.initiator = 1; 555 return; 556 } 557 558 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) 559 { 560 int mpalen, wrlen; 561 struct fw_ofld_tx_data_wr *req; 562 struct mpa_message *mpa; 563 struct sk_buff *skb; 564 565 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 566 567 mpalen = sizeof(*mpa) + plen; 568 wrlen = roundup(mpalen + sizeof *req, 16); 569 570 skb = get_skb(NULL, wrlen, GFP_KERNEL); 571 if (!skb) { 572 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); 573 return -ENOMEM; 574 } 575 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 576 577 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 578 memset(req, 0, wrlen); 579 req->op_to_immdlen = cpu_to_be32( 580 FW_WR_OP(FW_OFLD_TX_DATA_WR) | 581 FW_WR_COMPL(1) | 582 FW_WR_IMMDLEN(mpalen)); 583 req->flowid_len16 = cpu_to_be32( 584 FW_WR_FLOWID(ep->hwtid) | 585 FW_WR_LEN16(wrlen >> 4)); 586 req->plen = cpu_to_be32(mpalen); 587 req->tunnel_to_proxy = cpu_to_be32( 588 FW_OFLD_TX_DATA_WR_FLUSH(1) | 589 FW_OFLD_TX_DATA_WR_SHOVE(1)); 590 591 mpa = (struct mpa_message *)(req + 1); 592 memset(mpa, 0, sizeof(*mpa)); 593 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 594 mpa->flags = MPA_REJECT; 595 mpa->revision = mpa_rev; 596 mpa->private_data_size = htons(plen); 597 if (plen) 598 memcpy(mpa->private_data, pdata, plen); 599 600 /* 601 * Reference the mpa skb again. This ensures the data area 602 * will remain in memory until the hw acks the tx. 603 * Function fw4_ack() will deref it. 604 */ 605 skb_get(skb); 606 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 607 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 608 BUG_ON(ep->mpa_skb); 609 ep->mpa_skb = skb; 610 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 611 } 612 613 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) 614 { 615 int mpalen, wrlen; 616 struct fw_ofld_tx_data_wr *req; 617 struct mpa_message *mpa; 618 struct sk_buff *skb; 619 620 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 621 622 mpalen = sizeof(*mpa) + plen; 623 wrlen = roundup(mpalen + sizeof *req, 16); 624 625 skb = get_skb(NULL, wrlen, GFP_KERNEL); 626 if (!skb) { 627 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); 628 return -ENOMEM; 629 } 630 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 631 632 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); 633 memset(req, 0, wrlen); 634 req->op_to_immdlen = cpu_to_be32( 635 FW_WR_OP(FW_OFLD_TX_DATA_WR) | 636 FW_WR_COMPL(1) | 637 FW_WR_IMMDLEN(mpalen)); 638 req->flowid_len16 = cpu_to_be32( 639 FW_WR_FLOWID(ep->hwtid) | 640 FW_WR_LEN16(wrlen >> 4)); 641 req->plen = cpu_to_be32(mpalen); 642 req->tunnel_to_proxy = cpu_to_be32( 643 FW_OFLD_TX_DATA_WR_FLUSH(1) | 644 FW_OFLD_TX_DATA_WR_SHOVE(1)); 645 646 mpa = (struct mpa_message *)(req + 1); 647 memset(mpa, 0, sizeof(*mpa)); 648 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 649 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | 650 (markers_enabled ? MPA_MARKERS : 0); 651 mpa->revision = mpa_rev; 652 mpa->private_data_size = htons(plen); 653 if (plen) 654 memcpy(mpa->private_data, pdata, plen); 655 656 /* 657 * Reference the mpa skb. This ensures the data area 658 * will remain in memory until the hw acks the tx. 659 * Function fw4_ack() will deref it. 660 */ 661 skb_get(skb); 662 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 663 ep->mpa_skb = skb; 664 state_set(&ep->com, MPA_REP_SENT); 665 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 666 } 667 668 static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) 669 { 670 struct c4iw_ep *ep; 671 struct cpl_act_establish *req = cplhdr(skb); 672 unsigned int tid = GET_TID(req); 673 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); 674 struct tid_info *t = dev->rdev.lldi.tids; 675 676 ep = lookup_atid(t, atid); 677 678 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid, 679 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); 680 681 dst_confirm(ep->dst); 682 683 /* setup the hwtid for this connection */ 684 ep->hwtid = tid; 685 cxgb4_insert_tid(t, ep, tid); 686 687 ep->snd_seq = be32_to_cpu(req->snd_isn); 688 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 689 690 set_emss(ep, ntohs(req->tcp_opt)); 691 692 /* dealloc the atid */ 693 cxgb4_free_atid(t, atid); 694 695 /* start MPA negotiation */ 696 send_flowc(ep, NULL); 697 send_mpa_req(ep, skb); 698 699 return 0; 700 } 701 702 static void close_complete_upcall(struct c4iw_ep *ep) 703 { 704 struct iw_cm_event event; 705 706 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 707 memset(&event, 0, sizeof(event)); 708 event.event = IW_CM_EVENT_CLOSE; 709 if (ep->com.cm_id) { 710 PDBG("close complete delivered ep %p cm_id %p tid %u\n", 711 ep, ep->com.cm_id, ep->hwtid); 712 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 713 ep->com.cm_id->rem_ref(ep->com.cm_id); 714 ep->com.cm_id = NULL; 715 ep->com.qp = NULL; 716 } 717 } 718 719 static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) 720 { 721 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 722 close_complete_upcall(ep); 723 state_set(&ep->com, ABORTING); 724 return send_abort(ep, skb, gfp); 725 } 726 727 static void peer_close_upcall(struct c4iw_ep *ep) 728 { 729 struct iw_cm_event event; 730 731 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 732 memset(&event, 0, sizeof(event)); 733 event.event = IW_CM_EVENT_DISCONNECT; 734 if (ep->com.cm_id) { 735 PDBG("peer close delivered ep %p cm_id %p tid %u\n", 736 ep, ep->com.cm_id, ep->hwtid); 737 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 738 } 739 } 740 741 static void peer_abort_upcall(struct c4iw_ep *ep) 742 { 743 struct iw_cm_event event; 744 745 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 746 memset(&event, 0, sizeof(event)); 747 event.event = IW_CM_EVENT_CLOSE; 748 event.status = -ECONNRESET; 749 if (ep->com.cm_id) { 750 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep, 751 ep->com.cm_id, ep->hwtid); 752 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 753 ep->com.cm_id->rem_ref(ep->com.cm_id); 754 ep->com.cm_id = NULL; 755 ep->com.qp = NULL; 756 } 757 } 758 759 static void connect_reply_upcall(struct c4iw_ep *ep, int status) 760 { 761 struct iw_cm_event event; 762 763 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status); 764 memset(&event, 0, sizeof(event)); 765 event.event = IW_CM_EVENT_CONNECT_REPLY; 766 event.status = status; 767 event.local_addr = ep->com.local_addr; 768 event.remote_addr = ep->com.remote_addr; 769 770 if ((status == 0) || (status == -ECONNREFUSED)) { 771 event.private_data_len = ep->plen; 772 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 773 } 774 775 PDBG("%s ep %p tid %u status %d\n", __func__, ep, 776 ep->hwtid, status); 777 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 778 779 if (status < 0) { 780 ep->com.cm_id->rem_ref(ep->com.cm_id); 781 ep->com.cm_id = NULL; 782 ep->com.qp = NULL; 783 } 784 } 785 786 static void connect_request_upcall(struct c4iw_ep *ep) 787 { 788 struct iw_cm_event event; 789 790 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 791 memset(&event, 0, sizeof(event)); 792 event.event = IW_CM_EVENT_CONNECT_REQUEST; 793 event.local_addr = ep->com.local_addr; 794 event.remote_addr = ep->com.remote_addr; 795 event.private_data_len = ep->plen; 796 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 797 event.provider_data = ep; 798 if (state_read(&ep->parent_ep->com) != DEAD) { 799 c4iw_get_ep(&ep->com); 800 ep->parent_ep->com.cm_id->event_handler( 801 ep->parent_ep->com.cm_id, 802 &event); 803 } 804 c4iw_put_ep(&ep->parent_ep->com); 805 ep->parent_ep = NULL; 806 } 807 808 static void established_upcall(struct c4iw_ep *ep) 809 { 810 struct iw_cm_event event; 811 812 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 813 memset(&event, 0, sizeof(event)); 814 event.event = IW_CM_EVENT_ESTABLISHED; 815 if (ep->com.cm_id) { 816 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 817 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 818 } 819 } 820 821 static int update_rx_credits(struct c4iw_ep *ep, u32 credits) 822 { 823 struct cpl_rx_data_ack *req; 824 struct sk_buff *skb; 825 int wrlen = roundup(sizeof *req, 16); 826 827 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); 828 skb = get_skb(NULL, wrlen, GFP_KERNEL); 829 if (!skb) { 830 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); 831 return 0; 832 } 833 834 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); 835 memset(req, 0, wrlen); 836 INIT_TP_WR(req, ep->hwtid); 837 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 838 ep->hwtid)); 839 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) | 840 F_RX_DACK_CHANGE | 841 V_RX_DACK_MODE(dack_mode)); 842 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); 843 c4iw_ofld_send(&ep->com.dev->rdev, skb); 844 return credits; 845 } 846 847 static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) 848 { 849 struct mpa_message *mpa; 850 u16 plen; 851 struct c4iw_qp_attributes attrs; 852 enum c4iw_qp_attr_mask mask; 853 int err; 854 855 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 856 857 /* 858 * Stop mpa timer. If it expired, then the state has 859 * changed and we bail since ep_timeout already aborted 860 * the connection. 861 */ 862 stop_ep_timer(ep); 863 if (state_read(&ep->com) != MPA_REQ_SENT) 864 return; 865 866 /* 867 * If we get more than the supported amount of private data 868 * then we must fail this connection. 869 */ 870 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 871 err = -EINVAL; 872 goto err; 873 } 874 875 /* 876 * copy the new data into our accumulation buffer. 877 */ 878 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 879 skb->len); 880 ep->mpa_pkt_len += skb->len; 881 882 /* 883 * if we don't even have the mpa message, then bail. 884 */ 885 if (ep->mpa_pkt_len < sizeof(*mpa)) 886 return; 887 mpa = (struct mpa_message *) ep->mpa_pkt; 888 889 /* Validate MPA header. */ 890 if (mpa->revision != mpa_rev) { 891 err = -EPROTO; 892 goto err; 893 } 894 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { 895 err = -EPROTO; 896 goto err; 897 } 898 899 plen = ntohs(mpa->private_data_size); 900 901 /* 902 * Fail if there's too much private data. 903 */ 904 if (plen > MPA_MAX_PRIVATE_DATA) { 905 err = -EPROTO; 906 goto err; 907 } 908 909 /* 910 * If plen does not account for pkt size 911 */ 912 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 913 err = -EPROTO; 914 goto err; 915 } 916 917 ep->plen = (u8) plen; 918 919 /* 920 * If we don't have all the pdata yet, then bail. 921 * We'll continue process when more data arrives. 922 */ 923 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 924 return; 925 926 if (mpa->flags & MPA_REJECT) { 927 err = -ECONNREFUSED; 928 goto err; 929 } 930 931 /* 932 * If we get here we have accumulated the entire mpa 933 * start reply message including private data. And 934 * the MPA header is valid. 935 */ 936 state_set(&ep->com, FPDU_MODE); 937 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 938 ep->mpa_attr.recv_marker_enabled = markers_enabled; 939 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 940 ep->mpa_attr.version = mpa_rev; 941 ep->mpa_attr.p2p_type = peer2peer ? p2p_type : 942 FW_RI_INIT_P2PTYPE_DISABLED; 943 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 944 "xmit_marker_enabled=%d, version=%d\n", __func__, 945 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 946 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); 947 948 attrs.mpa_attr = ep->mpa_attr; 949 attrs.max_ird = ep->ird; 950 attrs.max_ord = ep->ord; 951 attrs.llp_stream_handle = ep; 952 attrs.next_state = C4IW_QP_STATE_RTS; 953 954 mask = C4IW_QP_ATTR_NEXT_STATE | 955 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | 956 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; 957 958 /* bind QP and TID with INIT_WR */ 959 err = c4iw_modify_qp(ep->com.qp->rhp, 960 ep->com.qp, mask, &attrs, 1); 961 if (err) 962 goto err; 963 goto out; 964 err: 965 state_set(&ep->com, ABORTING); 966 send_abort(ep, skb, GFP_KERNEL); 967 out: 968 connect_reply_upcall(ep, err); 969 return; 970 } 971 972 static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) 973 { 974 struct mpa_message *mpa; 975 u16 plen; 976 977 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 978 979 if (state_read(&ep->com) != MPA_REQ_WAIT) 980 return; 981 982 /* 983 * If we get more than the supported amount of private data 984 * then we must fail this connection. 985 */ 986 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 987 stop_ep_timer(ep); 988 abort_connection(ep, skb, GFP_KERNEL); 989 return; 990 } 991 992 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); 993 994 /* 995 * Copy the new data into our accumulation buffer. 996 */ 997 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 998 skb->len); 999 ep->mpa_pkt_len += skb->len; 1000 1001 /* 1002 * If we don't even have the mpa message, then bail. 1003 * We'll continue process when more data arrives. 1004 */ 1005 if (ep->mpa_pkt_len < sizeof(*mpa)) 1006 return; 1007 1008 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); 1009 stop_ep_timer(ep); 1010 mpa = (struct mpa_message *) ep->mpa_pkt; 1011 1012 /* 1013 * Validate MPA Header. 1014 */ 1015 if (mpa->revision != mpa_rev) { 1016 abort_connection(ep, skb, GFP_KERNEL); 1017 return; 1018 } 1019 1020 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { 1021 abort_connection(ep, skb, GFP_KERNEL); 1022 return; 1023 } 1024 1025 plen = ntohs(mpa->private_data_size); 1026 1027 /* 1028 * Fail if there's too much private data. 1029 */ 1030 if (plen > MPA_MAX_PRIVATE_DATA) { 1031 abort_connection(ep, skb, GFP_KERNEL); 1032 return; 1033 } 1034 1035 /* 1036 * If plen does not account for pkt size 1037 */ 1038 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1039 abort_connection(ep, skb, GFP_KERNEL); 1040 return; 1041 } 1042 ep->plen = (u8) plen; 1043 1044 /* 1045 * If we don't have all the pdata yet, then bail. 1046 */ 1047 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1048 return; 1049 1050 /* 1051 * If we get here we have accumulated the entire mpa 1052 * start reply message including private data. 1053 */ 1054 ep->mpa_attr.initiator = 0; 1055 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1056 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1057 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1058 ep->mpa_attr.version = mpa_rev; 1059 ep->mpa_attr.p2p_type = peer2peer ? p2p_type : 1060 FW_RI_INIT_P2PTYPE_DISABLED; 1061 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 1062 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__, 1063 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 1064 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1065 ep->mpa_attr.p2p_type); 1066 1067 state_set(&ep->com, MPA_REQ_RCVD); 1068 1069 /* drive upcall */ 1070 connect_request_upcall(ep); 1071 return; 1072 } 1073 1074 static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) 1075 { 1076 struct c4iw_ep *ep; 1077 struct cpl_rx_data *hdr = cplhdr(skb); 1078 unsigned int dlen = ntohs(hdr->len); 1079 unsigned int tid = GET_TID(hdr); 1080 struct tid_info *t = dev->rdev.lldi.tids; 1081 1082 ep = lookup_tid(t, tid); 1083 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); 1084 skb_pull(skb, sizeof(*hdr)); 1085 skb_trim(skb, dlen); 1086 1087 ep->rcv_seq += dlen; 1088 BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen)); 1089 1090 /* update RX credits */ 1091 update_rx_credits(ep, dlen); 1092 1093 switch (state_read(&ep->com)) { 1094 case MPA_REQ_SENT: 1095 process_mpa_reply(ep, skb); 1096 break; 1097 case MPA_REQ_WAIT: 1098 process_mpa_request(ep, skb); 1099 break; 1100 case MPA_REP_SENT: 1101 break; 1102 default: 1103 printk(KERN_ERR MOD "%s Unexpected streaming data." 1104 " ep %p state %d tid %u\n", 1105 __func__, ep, state_read(&ep->com), ep->hwtid); 1106 1107 /* 1108 * The ep will timeout and inform the ULP of the failure. 1109 * See ep_timeout(). 1110 */ 1111 break; 1112 } 1113 return 0; 1114 } 1115 1116 static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1117 { 1118 struct c4iw_ep *ep; 1119 struct cpl_abort_rpl_rss *rpl = cplhdr(skb); 1120 int release = 0; 1121 unsigned int tid = GET_TID(rpl); 1122 struct tid_info *t = dev->rdev.lldi.tids; 1123 1124 ep = lookup_tid(t, tid); 1125 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1126 BUG_ON(!ep); 1127 mutex_lock(&ep->com.mutex); 1128 switch (ep->com.state) { 1129 case ABORTING: 1130 __state_set(&ep->com, DEAD); 1131 release = 1; 1132 break; 1133 default: 1134 printk(KERN_ERR "%s ep %p state %d\n", 1135 __func__, ep, ep->com.state); 1136 break; 1137 } 1138 mutex_unlock(&ep->com.mutex); 1139 1140 if (release) 1141 release_ep_resources(ep); 1142 return 0; 1143 } 1144 1145 /* 1146 * Return whether a failed active open has allocated a TID 1147 */ 1148 static inline int act_open_has_tid(int status) 1149 { 1150 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && 1151 status != CPL_ERR_ARP_MISS; 1152 } 1153 1154 static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1155 { 1156 struct c4iw_ep *ep; 1157 struct cpl_act_open_rpl *rpl = cplhdr(skb); 1158 unsigned int atid = GET_TID_TID(GET_AOPEN_ATID( 1159 ntohl(rpl->atid_status))); 1160 struct tid_info *t = dev->rdev.lldi.tids; 1161 int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status)); 1162 1163 ep = lookup_atid(t, atid); 1164 1165 PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, 1166 status, status2errno(status)); 1167 1168 if (status == CPL_ERR_RTX_NEG_ADVICE) { 1169 printk(KERN_WARNING MOD "Connection problems for atid %u\n", 1170 atid); 1171 return 0; 1172 } 1173 1174 connect_reply_upcall(ep, status2errno(status)); 1175 state_set(&ep->com, DEAD); 1176 1177 if (status && act_open_has_tid(status)) 1178 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); 1179 1180 cxgb4_free_atid(t, atid); 1181 dst_release(ep->dst); 1182 cxgb4_l2t_release(ep->l2t); 1183 c4iw_put_ep(&ep->com); 1184 1185 return 0; 1186 } 1187 1188 static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1189 { 1190 struct cpl_pass_open_rpl *rpl = cplhdr(skb); 1191 struct tid_info *t = dev->rdev.lldi.tids; 1192 unsigned int stid = GET_TID(rpl); 1193 struct c4iw_listen_ep *ep = lookup_stid(t, stid); 1194 1195 if (!ep) { 1196 printk(KERN_ERR MOD "stid %d lookup failure!\n", stid); 1197 return 0; 1198 } 1199 PDBG("%s ep %p status %d error %d\n", __func__, ep, 1200 rpl->status, status2errno(rpl->status)); 1201 ep->com.wr_wait.ret = status2errno(rpl->status); 1202 ep->com.wr_wait.done = 1; 1203 wake_up(&ep->com.wr_wait.wait); 1204 1205 return 0; 1206 } 1207 1208 static int listen_stop(struct c4iw_listen_ep *ep) 1209 { 1210 struct sk_buff *skb; 1211 struct cpl_close_listsvr_req *req; 1212 1213 PDBG("%s ep %p\n", __func__, ep); 1214 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1215 if (!skb) { 1216 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); 1217 return -ENOMEM; 1218 } 1219 req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req)); 1220 INIT_TP_WR(req, 0); 1221 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, 1222 ep->stid)); 1223 req->reply_ctrl = cpu_to_be16( 1224 QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0])); 1225 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); 1226 return c4iw_ofld_send(&ep->com.dev->rdev, skb); 1227 } 1228 1229 static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1230 { 1231 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); 1232 struct tid_info *t = dev->rdev.lldi.tids; 1233 unsigned int stid = GET_TID(rpl); 1234 struct c4iw_listen_ep *ep = lookup_stid(t, stid); 1235 1236 PDBG("%s ep %p\n", __func__, ep); 1237 ep->com.wr_wait.ret = status2errno(rpl->status); 1238 ep->com.wr_wait.done = 1; 1239 wake_up(&ep->com.wr_wait.wait); 1240 return 0; 1241 } 1242 1243 static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb, 1244 struct cpl_pass_accept_req *req) 1245 { 1246 struct cpl_pass_accept_rpl *rpl; 1247 unsigned int mtu_idx; 1248 u64 opt0; 1249 u32 opt2; 1250 int wscale; 1251 1252 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1253 BUG_ON(skb_cloned(skb)); 1254 skb_trim(skb, sizeof(*rpl)); 1255 skb_get(skb); 1256 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 1257 wscale = compute_wscale(rcv_win); 1258 opt0 = KEEP_ALIVE(1) | 1259 DELACK(1) | 1260 WND_SCALE(wscale) | 1261 MSS_IDX(mtu_idx) | 1262 L2T_IDX(ep->l2t->idx) | 1263 TX_CHAN(ep->tx_chan) | 1264 SMAC_SEL(ep->smac_idx) | 1265 DSCP(ep->tos) | 1266 ULP_MODE(ULP_MODE_TCPDDP) | 1267 RCV_BUFSIZ(rcv_win>>10); 1268 opt2 = RX_CHANNEL(0) | 1269 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 1270 1271 if (enable_tcp_timestamps && req->tcpopt.tstamp) 1272 opt2 |= TSTAMPS_EN(1); 1273 if (enable_tcp_sack && req->tcpopt.sack) 1274 opt2 |= SACK_EN(1); 1275 if (wscale && enable_tcp_window_scaling) 1276 opt2 |= WND_SCALE_EN(1); 1277 1278 rpl = cplhdr(skb); 1279 INIT_TP_WR(rpl, ep->hwtid); 1280 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, 1281 ep->hwtid)); 1282 rpl->opt0 = cpu_to_be64(opt0); 1283 rpl->opt2 = cpu_to_be32(opt2); 1284 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 1285 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1286 1287 return; 1288 } 1289 1290 static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip, 1291 struct sk_buff *skb) 1292 { 1293 PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid, 1294 peer_ip); 1295 BUG_ON(skb_cloned(skb)); 1296 skb_trim(skb, sizeof(struct cpl_tid_release)); 1297 skb_get(skb); 1298 release_tid(&dev->rdev, hwtid, skb); 1299 return; 1300 } 1301 1302 static void get_4tuple(struct cpl_pass_accept_req *req, 1303 __be32 *local_ip, __be32 *peer_ip, 1304 __be16 *local_port, __be16 *peer_port) 1305 { 1306 int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len)); 1307 int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len)); 1308 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len); 1309 struct tcphdr *tcp = (struct tcphdr *) 1310 ((u8 *)(req + 1) + eth_len + ip_len); 1311 1312 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__, 1313 ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source), 1314 ntohs(tcp->dest)); 1315 1316 *peer_ip = ip->saddr; 1317 *local_ip = ip->daddr; 1318 *peer_port = tcp->source; 1319 *local_port = tcp->dest; 1320 1321 return; 1322 } 1323 1324 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) 1325 { 1326 struct c4iw_ep *child_ep, *parent_ep; 1327 struct cpl_pass_accept_req *req = cplhdr(skb); 1328 unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); 1329 struct tid_info *t = dev->rdev.lldi.tids; 1330 unsigned int hwtid = GET_TID(req); 1331 struct dst_entry *dst; 1332 struct l2t_entry *l2t; 1333 struct rtable *rt; 1334 __be32 local_ip, peer_ip; 1335 __be16 local_port, peer_port; 1336 struct net_device *pdev; 1337 u32 tx_chan, smac_idx; 1338 u16 rss_qid; 1339 u32 mtu; 1340 int step; 1341 int txq_idx, ctrlq_idx; 1342 1343 parent_ep = lookup_stid(t, stid); 1344 PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid); 1345 1346 get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port); 1347 1348 if (state_read(&parent_ep->com) != LISTEN) { 1349 printk(KERN_ERR "%s - listening ep not in LISTEN\n", 1350 __func__); 1351 goto reject; 1352 } 1353 1354 /* Find output route */ 1355 rt = find_route(dev, local_ip, peer_ip, local_port, peer_port, 1356 GET_POPEN_TOS(ntohl(req->tos_stid))); 1357 if (!rt) { 1358 printk(KERN_ERR MOD "%s - failed to find dst entry!\n", 1359 __func__); 1360 goto reject; 1361 } 1362 dst = &rt->dst; 1363 if (dst->neighbour->dev->flags & IFF_LOOPBACK) { 1364 pdev = ip_dev_find(&init_net, peer_ip); 1365 BUG_ON(!pdev); 1366 l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour, 1367 pdev, 0); 1368 mtu = pdev->mtu; 1369 tx_chan = cxgb4_port_chan(pdev); 1370 smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; 1371 step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; 1372 txq_idx = cxgb4_port_idx(pdev) * step; 1373 ctrlq_idx = cxgb4_port_idx(pdev); 1374 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; 1375 rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step]; 1376 dev_put(pdev); 1377 } else { 1378 l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour, 1379 dst->neighbour->dev, 0); 1380 mtu = dst_mtu(dst); 1381 tx_chan = cxgb4_port_chan(dst->neighbour->dev); 1382 smac_idx = (cxgb4_port_viid(dst->neighbour->dev) & 0x7F) << 1; 1383 step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; 1384 txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step; 1385 ctrlq_idx = cxgb4_port_idx(dst->neighbour->dev); 1386 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; 1387 rss_qid = dev->rdev.lldi.rxq_ids[ 1388 cxgb4_port_idx(dst->neighbour->dev) * step]; 1389 } 1390 if (!l2t) { 1391 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", 1392 __func__); 1393 dst_release(dst); 1394 goto reject; 1395 } 1396 1397 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); 1398 if (!child_ep) { 1399 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", 1400 __func__); 1401 cxgb4_l2t_release(l2t); 1402 dst_release(dst); 1403 goto reject; 1404 } 1405 state_set(&child_ep->com, CONNECTING); 1406 child_ep->com.dev = dev; 1407 child_ep->com.cm_id = NULL; 1408 child_ep->com.local_addr.sin_family = PF_INET; 1409 child_ep->com.local_addr.sin_port = local_port; 1410 child_ep->com.local_addr.sin_addr.s_addr = local_ip; 1411 child_ep->com.remote_addr.sin_family = PF_INET; 1412 child_ep->com.remote_addr.sin_port = peer_port; 1413 child_ep->com.remote_addr.sin_addr.s_addr = peer_ip; 1414 c4iw_get_ep(&parent_ep->com); 1415 child_ep->parent_ep = parent_ep; 1416 child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid)); 1417 child_ep->l2t = l2t; 1418 child_ep->dst = dst; 1419 child_ep->hwtid = hwtid; 1420 child_ep->tx_chan = tx_chan; 1421 child_ep->smac_idx = smac_idx; 1422 child_ep->rss_qid = rss_qid; 1423 child_ep->mtu = mtu; 1424 child_ep->txq_idx = txq_idx; 1425 child_ep->ctrlq_idx = ctrlq_idx; 1426 1427 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__, 1428 tx_chan, smac_idx, rss_qid); 1429 1430 init_timer(&child_ep->timer); 1431 cxgb4_insert_tid(t, child_ep, hwtid); 1432 accept_cr(child_ep, peer_ip, skb, req); 1433 goto out; 1434 reject: 1435 reject_cr(dev, hwtid, peer_ip, skb); 1436 out: 1437 return 0; 1438 } 1439 1440 static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) 1441 { 1442 struct c4iw_ep *ep; 1443 struct cpl_pass_establish *req = cplhdr(skb); 1444 struct tid_info *t = dev->rdev.lldi.tids; 1445 unsigned int tid = GET_TID(req); 1446 1447 ep = lookup_tid(t, tid); 1448 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1449 ep->snd_seq = be32_to_cpu(req->snd_isn); 1450 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 1451 1452 set_emss(ep, ntohs(req->tcp_opt)); 1453 1454 dst_confirm(ep->dst); 1455 state_set(&ep->com, MPA_REQ_WAIT); 1456 start_ep_timer(ep); 1457 send_flowc(ep, skb); 1458 1459 return 0; 1460 } 1461 1462 static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) 1463 { 1464 struct cpl_peer_close *hdr = cplhdr(skb); 1465 struct c4iw_ep *ep; 1466 struct c4iw_qp_attributes attrs; 1467 int disconnect = 1; 1468 int release = 0; 1469 int closing = 0; 1470 struct tid_info *t = dev->rdev.lldi.tids; 1471 unsigned int tid = GET_TID(hdr); 1472 1473 ep = lookup_tid(t, tid); 1474 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1475 dst_confirm(ep->dst); 1476 1477 mutex_lock(&ep->com.mutex); 1478 switch (ep->com.state) { 1479 case MPA_REQ_WAIT: 1480 __state_set(&ep->com, CLOSING); 1481 break; 1482 case MPA_REQ_SENT: 1483 __state_set(&ep->com, CLOSING); 1484 connect_reply_upcall(ep, -ECONNRESET); 1485 break; 1486 case MPA_REQ_RCVD: 1487 1488 /* 1489 * We're gonna mark this puppy DEAD, but keep 1490 * the reference on it until the ULP accepts or 1491 * rejects the CR. Also wake up anyone waiting 1492 * in rdma connection migration (see c4iw_accept_cr()). 1493 */ 1494 __state_set(&ep->com, CLOSING); 1495 ep->com.wr_wait.done = 1; 1496 ep->com.wr_wait.ret = -ECONNRESET; 1497 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 1498 wake_up(&ep->com.wr_wait.wait); 1499 break; 1500 case MPA_REP_SENT: 1501 __state_set(&ep->com, CLOSING); 1502 ep->com.wr_wait.done = 1; 1503 ep->com.wr_wait.ret = -ECONNRESET; 1504 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 1505 wake_up(&ep->com.wr_wait.wait); 1506 break; 1507 case FPDU_MODE: 1508 start_ep_timer(ep); 1509 __state_set(&ep->com, CLOSING); 1510 closing = 1; 1511 peer_close_upcall(ep); 1512 break; 1513 case ABORTING: 1514 disconnect = 0; 1515 break; 1516 case CLOSING: 1517 __state_set(&ep->com, MORIBUND); 1518 disconnect = 0; 1519 break; 1520 case MORIBUND: 1521 stop_ep_timer(ep); 1522 if (ep->com.cm_id && ep->com.qp) { 1523 attrs.next_state = C4IW_QP_STATE_IDLE; 1524 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1525 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1526 } 1527 close_complete_upcall(ep); 1528 __state_set(&ep->com, DEAD); 1529 release = 1; 1530 disconnect = 0; 1531 break; 1532 case DEAD: 1533 disconnect = 0; 1534 break; 1535 default: 1536 BUG_ON(1); 1537 } 1538 mutex_unlock(&ep->com.mutex); 1539 if (closing) { 1540 attrs.next_state = C4IW_QP_STATE_CLOSING; 1541 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1542 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1543 } 1544 if (disconnect) 1545 c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 1546 if (release) 1547 release_ep_resources(ep); 1548 return 0; 1549 } 1550 1551 /* 1552 * Returns whether an ABORT_REQ_RSS message is a negative advice. 1553 */ 1554 static int is_neg_adv_abort(unsigned int status) 1555 { 1556 return status == CPL_ERR_RTX_NEG_ADVICE || 1557 status == CPL_ERR_PERSIST_NEG_ADVICE; 1558 } 1559 1560 static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) 1561 { 1562 struct cpl_abort_req_rss *req = cplhdr(skb); 1563 struct c4iw_ep *ep; 1564 struct cpl_abort_rpl *rpl; 1565 struct sk_buff *rpl_skb; 1566 struct c4iw_qp_attributes attrs; 1567 int ret; 1568 int release = 0; 1569 struct tid_info *t = dev->rdev.lldi.tids; 1570 unsigned int tid = GET_TID(req); 1571 1572 ep = lookup_tid(t, tid); 1573 if (is_neg_adv_abort(req->status)) { 1574 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, 1575 ep->hwtid); 1576 return 0; 1577 } 1578 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 1579 ep->com.state); 1580 1581 /* 1582 * Wake up any threads in rdma_init() or rdma_fini(). 1583 */ 1584 ep->com.wr_wait.done = 1; 1585 ep->com.wr_wait.ret = -ECONNRESET; 1586 wake_up(&ep->com.wr_wait.wait); 1587 1588 mutex_lock(&ep->com.mutex); 1589 switch (ep->com.state) { 1590 case CONNECTING: 1591 break; 1592 case MPA_REQ_WAIT: 1593 stop_ep_timer(ep); 1594 break; 1595 case MPA_REQ_SENT: 1596 stop_ep_timer(ep); 1597 connect_reply_upcall(ep, -ECONNRESET); 1598 break; 1599 case MPA_REP_SENT: 1600 break; 1601 case MPA_REQ_RCVD: 1602 break; 1603 case MORIBUND: 1604 case CLOSING: 1605 stop_ep_timer(ep); 1606 /*FALLTHROUGH*/ 1607 case FPDU_MODE: 1608 if (ep->com.cm_id && ep->com.qp) { 1609 attrs.next_state = C4IW_QP_STATE_ERROR; 1610 ret = c4iw_modify_qp(ep->com.qp->rhp, 1611 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 1612 &attrs, 1); 1613 if (ret) 1614 printk(KERN_ERR MOD 1615 "%s - qp <- error failed!\n", 1616 __func__); 1617 } 1618 peer_abort_upcall(ep); 1619 break; 1620 case ABORTING: 1621 break; 1622 case DEAD: 1623 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); 1624 mutex_unlock(&ep->com.mutex); 1625 return 0; 1626 default: 1627 BUG_ON(1); 1628 break; 1629 } 1630 dst_confirm(ep->dst); 1631 if (ep->com.state != ABORTING) { 1632 __state_set(&ep->com, DEAD); 1633 release = 1; 1634 } 1635 mutex_unlock(&ep->com.mutex); 1636 1637 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); 1638 if (!rpl_skb) { 1639 printk(KERN_ERR MOD "%s - cannot allocate skb!\n", 1640 __func__); 1641 release = 1; 1642 goto out; 1643 } 1644 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 1645 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); 1646 INIT_TP_WR(rpl, ep->hwtid); 1647 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); 1648 rpl->cmd = CPL_ABORT_NO_RST; 1649 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); 1650 out: 1651 if (release) 1652 release_ep_resources(ep); 1653 return 0; 1654 } 1655 1656 static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1657 { 1658 struct c4iw_ep *ep; 1659 struct c4iw_qp_attributes attrs; 1660 struct cpl_close_con_rpl *rpl = cplhdr(skb); 1661 int release = 0; 1662 struct tid_info *t = dev->rdev.lldi.tids; 1663 unsigned int tid = GET_TID(rpl); 1664 1665 ep = lookup_tid(t, tid); 1666 1667 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1668 BUG_ON(!ep); 1669 1670 /* The cm_id may be null if we failed to connect */ 1671 mutex_lock(&ep->com.mutex); 1672 switch (ep->com.state) { 1673 case CLOSING: 1674 __state_set(&ep->com, MORIBUND); 1675 break; 1676 case MORIBUND: 1677 stop_ep_timer(ep); 1678 if ((ep->com.cm_id) && (ep->com.qp)) { 1679 attrs.next_state = C4IW_QP_STATE_IDLE; 1680 c4iw_modify_qp(ep->com.qp->rhp, 1681 ep->com.qp, 1682 C4IW_QP_ATTR_NEXT_STATE, 1683 &attrs, 1); 1684 } 1685 close_complete_upcall(ep); 1686 __state_set(&ep->com, DEAD); 1687 release = 1; 1688 break; 1689 case ABORTING: 1690 case DEAD: 1691 break; 1692 default: 1693 BUG_ON(1); 1694 break; 1695 } 1696 mutex_unlock(&ep->com.mutex); 1697 if (release) 1698 release_ep_resources(ep); 1699 return 0; 1700 } 1701 1702 static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) 1703 { 1704 struct cpl_rdma_terminate *rpl = cplhdr(skb); 1705 struct tid_info *t = dev->rdev.lldi.tids; 1706 unsigned int tid = GET_TID(rpl); 1707 struct c4iw_ep *ep; 1708 struct c4iw_qp_attributes attrs; 1709 1710 ep = lookup_tid(t, tid); 1711 BUG_ON(!ep); 1712 1713 if (ep->com.qp) { 1714 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, 1715 ep->com.qp->wq.sq.qid); 1716 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1717 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1718 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1719 } else 1720 printk(KERN_WARNING MOD "TERM received tid %u no qp\n", tid); 1721 1722 return 0; 1723 } 1724 1725 /* 1726 * Upcall from the adapter indicating data has been transmitted. 1727 * For us its just the single MPA request or reply. We can now free 1728 * the skb holding the mpa message. 1729 */ 1730 static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) 1731 { 1732 struct c4iw_ep *ep; 1733 struct cpl_fw4_ack *hdr = cplhdr(skb); 1734 u8 credits = hdr->credits; 1735 unsigned int tid = GET_TID(hdr); 1736 struct tid_info *t = dev->rdev.lldi.tids; 1737 1738 1739 ep = lookup_tid(t, tid); 1740 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); 1741 if (credits == 0) { 1742 PDBG("%s 0 credit ack ep %p tid %u state %u\n", 1743 __func__, ep, ep->hwtid, state_read(&ep->com)); 1744 return 0; 1745 } 1746 1747 dst_confirm(ep->dst); 1748 if (ep->mpa_skb) { 1749 PDBG("%s last streaming msg ack ep %p tid %u state %u " 1750 "initiator %u freeing skb\n", __func__, ep, ep->hwtid, 1751 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); 1752 kfree_skb(ep->mpa_skb); 1753 ep->mpa_skb = NULL; 1754 } 1755 return 0; 1756 } 1757 1758 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 1759 { 1760 int err; 1761 struct c4iw_ep *ep = to_ep(cm_id); 1762 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1763 1764 if (state_read(&ep->com) == DEAD) { 1765 c4iw_put_ep(&ep->com); 1766 return -ECONNRESET; 1767 } 1768 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 1769 if (mpa_rev == 0) 1770 abort_connection(ep, NULL, GFP_KERNEL); 1771 else { 1772 err = send_mpa_reject(ep, pdata, pdata_len); 1773 err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 1774 } 1775 c4iw_put_ep(&ep->com); 1776 return 0; 1777 } 1778 1779 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 1780 { 1781 int err; 1782 struct c4iw_qp_attributes attrs; 1783 enum c4iw_qp_attr_mask mask; 1784 struct c4iw_ep *ep = to_ep(cm_id); 1785 struct c4iw_dev *h = to_c4iw_dev(cm_id->device); 1786 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); 1787 1788 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1789 if (state_read(&ep->com) == DEAD) { 1790 err = -ECONNRESET; 1791 goto err; 1792 } 1793 1794 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 1795 BUG_ON(!qp); 1796 1797 if ((conn_param->ord > c4iw_max_read_depth) || 1798 (conn_param->ird > c4iw_max_read_depth)) { 1799 abort_connection(ep, NULL, GFP_KERNEL); 1800 err = -EINVAL; 1801 goto err; 1802 } 1803 1804 cm_id->add_ref(cm_id); 1805 ep->com.cm_id = cm_id; 1806 ep->com.qp = qp; 1807 1808 ep->ird = conn_param->ird; 1809 ep->ord = conn_param->ord; 1810 1811 if (peer2peer && ep->ird == 0) 1812 ep->ird = 1; 1813 1814 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); 1815 1816 /* bind QP to EP and move to RTS */ 1817 attrs.mpa_attr = ep->mpa_attr; 1818 attrs.max_ird = ep->ird; 1819 attrs.max_ord = ep->ord; 1820 attrs.llp_stream_handle = ep; 1821 attrs.next_state = C4IW_QP_STATE_RTS; 1822 1823 /* bind QP and TID with INIT_WR */ 1824 mask = C4IW_QP_ATTR_NEXT_STATE | 1825 C4IW_QP_ATTR_LLP_STREAM_HANDLE | 1826 C4IW_QP_ATTR_MPA_ATTR | 1827 C4IW_QP_ATTR_MAX_IRD | 1828 C4IW_QP_ATTR_MAX_ORD; 1829 1830 err = c4iw_modify_qp(ep->com.qp->rhp, 1831 ep->com.qp, mask, &attrs, 1); 1832 if (err) 1833 goto err1; 1834 err = send_mpa_reply(ep, conn_param->private_data, 1835 conn_param->private_data_len); 1836 if (err) 1837 goto err1; 1838 1839 state_set(&ep->com, FPDU_MODE); 1840 established_upcall(ep); 1841 c4iw_put_ep(&ep->com); 1842 return 0; 1843 err1: 1844 ep->com.cm_id = NULL; 1845 ep->com.qp = NULL; 1846 cm_id->rem_ref(cm_id); 1847 err: 1848 c4iw_put_ep(&ep->com); 1849 return err; 1850 } 1851 1852 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 1853 { 1854 int err = 0; 1855 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 1856 struct c4iw_ep *ep; 1857 struct rtable *rt; 1858 struct net_device *pdev; 1859 int step; 1860 1861 if ((conn_param->ord > c4iw_max_read_depth) || 1862 (conn_param->ird > c4iw_max_read_depth)) { 1863 err = -EINVAL; 1864 goto out; 1865 } 1866 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 1867 if (!ep) { 1868 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 1869 err = -ENOMEM; 1870 goto out; 1871 } 1872 init_timer(&ep->timer); 1873 ep->plen = conn_param->private_data_len; 1874 if (ep->plen) 1875 memcpy(ep->mpa_pkt + sizeof(struct mpa_message), 1876 conn_param->private_data, ep->plen); 1877 ep->ird = conn_param->ird; 1878 ep->ord = conn_param->ord; 1879 1880 if (peer2peer && ep->ord == 0) 1881 ep->ord = 1; 1882 1883 cm_id->add_ref(cm_id); 1884 ep->com.dev = dev; 1885 ep->com.cm_id = cm_id; 1886 ep->com.qp = get_qhp(dev, conn_param->qpn); 1887 BUG_ON(!ep->com.qp); 1888 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, 1889 ep->com.qp, cm_id); 1890 1891 /* 1892 * Allocate an active TID to initiate a TCP connection. 1893 */ 1894 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep); 1895 if (ep->atid == -1) { 1896 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); 1897 err = -ENOMEM; 1898 goto fail2; 1899 } 1900 1901 PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__, 1902 ntohl(cm_id->local_addr.sin_addr.s_addr), 1903 ntohs(cm_id->local_addr.sin_port), 1904 ntohl(cm_id->remote_addr.sin_addr.s_addr), 1905 ntohs(cm_id->remote_addr.sin_port)); 1906 1907 /* find a route */ 1908 rt = find_route(dev, 1909 cm_id->local_addr.sin_addr.s_addr, 1910 cm_id->remote_addr.sin_addr.s_addr, 1911 cm_id->local_addr.sin_port, 1912 cm_id->remote_addr.sin_port, 0); 1913 if (!rt) { 1914 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); 1915 err = -EHOSTUNREACH; 1916 goto fail3; 1917 } 1918 ep->dst = &rt->dst; 1919 1920 /* get a l2t entry */ 1921 if (ep->dst->neighbour->dev->flags & IFF_LOOPBACK) { 1922 PDBG("%s LOOPBACK\n", __func__); 1923 pdev = ip_dev_find(&init_net, 1924 cm_id->remote_addr.sin_addr.s_addr); 1925 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, 1926 ep->dst->neighbour, 1927 pdev, 0); 1928 ep->mtu = pdev->mtu; 1929 ep->tx_chan = cxgb4_port_chan(pdev); 1930 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; 1931 step = ep->com.dev->rdev.lldi.ntxq / 1932 ep->com.dev->rdev.lldi.nchan; 1933 ep->txq_idx = cxgb4_port_idx(pdev) * step; 1934 step = ep->com.dev->rdev.lldi.nrxq / 1935 ep->com.dev->rdev.lldi.nchan; 1936 ep->ctrlq_idx = cxgb4_port_idx(pdev); 1937 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ 1938 cxgb4_port_idx(pdev) * step]; 1939 dev_put(pdev); 1940 } else { 1941 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, 1942 ep->dst->neighbour, 1943 ep->dst->neighbour->dev, 0); 1944 ep->mtu = dst_mtu(ep->dst); 1945 ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev); 1946 ep->smac_idx = (cxgb4_port_viid(ep->dst->neighbour->dev) & 1947 0x7F) << 1; 1948 step = ep->com.dev->rdev.lldi.ntxq / 1949 ep->com.dev->rdev.lldi.nchan; 1950 ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step; 1951 ep->ctrlq_idx = cxgb4_port_idx(ep->dst->neighbour->dev); 1952 step = ep->com.dev->rdev.lldi.nrxq / 1953 ep->com.dev->rdev.lldi.nchan; 1954 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ 1955 cxgb4_port_idx(ep->dst->neighbour->dev) * step]; 1956 } 1957 if (!ep->l2t) { 1958 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 1959 err = -ENOMEM; 1960 goto fail4; 1961 } 1962 1963 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 1964 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 1965 ep->l2t->idx); 1966 1967 state_set(&ep->com, CONNECTING); 1968 ep->tos = 0; 1969 ep->com.local_addr = cm_id->local_addr; 1970 ep->com.remote_addr = cm_id->remote_addr; 1971 1972 /* send connect request to rnic */ 1973 err = send_connect(ep); 1974 if (!err) 1975 goto out; 1976 1977 cxgb4_l2t_release(ep->l2t); 1978 fail4: 1979 dst_release(ep->dst); 1980 fail3: 1981 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 1982 fail2: 1983 cm_id->rem_ref(cm_id); 1984 c4iw_put_ep(&ep->com); 1985 out: 1986 return err; 1987 } 1988 1989 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) 1990 { 1991 int err = 0; 1992 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 1993 struct c4iw_listen_ep *ep; 1994 1995 1996 might_sleep(); 1997 1998 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 1999 if (!ep) { 2000 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 2001 err = -ENOMEM; 2002 goto fail1; 2003 } 2004 PDBG("%s ep %p\n", __func__, ep); 2005 cm_id->add_ref(cm_id); 2006 ep->com.cm_id = cm_id; 2007 ep->com.dev = dev; 2008 ep->backlog = backlog; 2009 ep->com.local_addr = cm_id->local_addr; 2010 2011 /* 2012 * Allocate a server TID. 2013 */ 2014 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep); 2015 if (ep->stid == -1) { 2016 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__); 2017 err = -ENOMEM; 2018 goto fail2; 2019 } 2020 2021 state_set(&ep->com, LISTEN); 2022 c4iw_init_wr_wait(&ep->com.wr_wait); 2023 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid, 2024 ep->com.local_addr.sin_addr.s_addr, 2025 ep->com.local_addr.sin_port, 2026 ep->com.dev->rdev.lldi.rxq_ids[0]); 2027 if (err) 2028 goto fail3; 2029 2030 /* wait for pass_open_rpl */ 2031 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, 2032 __func__); 2033 if (!err) { 2034 cm_id->provider_data = ep; 2035 goto out; 2036 } 2037 fail3: 2038 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); 2039 fail2: 2040 cm_id->rem_ref(cm_id); 2041 c4iw_put_ep(&ep->com); 2042 fail1: 2043 out: 2044 return err; 2045 } 2046 2047 int c4iw_destroy_listen(struct iw_cm_id *cm_id) 2048 { 2049 int err; 2050 struct c4iw_listen_ep *ep = to_listen_ep(cm_id); 2051 2052 PDBG("%s ep %p\n", __func__, ep); 2053 2054 might_sleep(); 2055 state_set(&ep->com, DEAD); 2056 c4iw_init_wr_wait(&ep->com.wr_wait); 2057 err = listen_stop(ep); 2058 if (err) 2059 goto done; 2060 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, 2061 __func__); 2062 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); 2063 done: 2064 cm_id->rem_ref(cm_id); 2065 c4iw_put_ep(&ep->com); 2066 return err; 2067 } 2068 2069 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) 2070 { 2071 int ret = 0; 2072 int close = 0; 2073 int fatal = 0; 2074 struct c4iw_rdev *rdev; 2075 2076 mutex_lock(&ep->com.mutex); 2077 2078 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, 2079 states[ep->com.state], abrupt); 2080 2081 rdev = &ep->com.dev->rdev; 2082 if (c4iw_fatal_error(rdev)) { 2083 fatal = 1; 2084 close_complete_upcall(ep); 2085 ep->com.state = DEAD; 2086 } 2087 switch (ep->com.state) { 2088 case MPA_REQ_WAIT: 2089 case MPA_REQ_SENT: 2090 case MPA_REQ_RCVD: 2091 case MPA_REP_SENT: 2092 case FPDU_MODE: 2093 close = 1; 2094 if (abrupt) 2095 ep->com.state = ABORTING; 2096 else { 2097 ep->com.state = CLOSING; 2098 start_ep_timer(ep); 2099 } 2100 set_bit(CLOSE_SENT, &ep->com.flags); 2101 break; 2102 case CLOSING: 2103 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { 2104 close = 1; 2105 if (abrupt) { 2106 stop_ep_timer(ep); 2107 ep->com.state = ABORTING; 2108 } else 2109 ep->com.state = MORIBUND; 2110 } 2111 break; 2112 case MORIBUND: 2113 case ABORTING: 2114 case DEAD: 2115 PDBG("%s ignoring disconnect ep %p state %u\n", 2116 __func__, ep, ep->com.state); 2117 break; 2118 default: 2119 BUG(); 2120 break; 2121 } 2122 2123 mutex_unlock(&ep->com.mutex); 2124 if (close) { 2125 if (abrupt) 2126 ret = abort_connection(ep, NULL, gfp); 2127 else 2128 ret = send_halfclose(ep, gfp); 2129 if (ret) 2130 fatal = 1; 2131 } 2132 if (fatal) 2133 release_ep_resources(ep); 2134 return ret; 2135 } 2136 2137 static int async_event(struct c4iw_dev *dev, struct sk_buff *skb) 2138 { 2139 struct cpl_fw6_msg *rpl = cplhdr(skb); 2140 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); 2141 return 0; 2142 } 2143 2144 /* 2145 * These are the real handlers that are called from a 2146 * work queue. 2147 */ 2148 static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = { 2149 [CPL_ACT_ESTABLISH] = act_establish, 2150 [CPL_ACT_OPEN_RPL] = act_open_rpl, 2151 [CPL_RX_DATA] = rx_data, 2152 [CPL_ABORT_RPL_RSS] = abort_rpl, 2153 [CPL_ABORT_RPL] = abort_rpl, 2154 [CPL_PASS_OPEN_RPL] = pass_open_rpl, 2155 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl, 2156 [CPL_PASS_ACCEPT_REQ] = pass_accept_req, 2157 [CPL_PASS_ESTABLISH] = pass_establish, 2158 [CPL_PEER_CLOSE] = peer_close, 2159 [CPL_ABORT_REQ_RSS] = peer_abort, 2160 [CPL_CLOSE_CON_RPL] = close_con_rpl, 2161 [CPL_RDMA_TERMINATE] = terminate, 2162 [CPL_FW4_ACK] = fw4_ack, 2163 [CPL_FW6_MSG] = async_event 2164 }; 2165 2166 static void process_timeout(struct c4iw_ep *ep) 2167 { 2168 struct c4iw_qp_attributes attrs; 2169 int abort = 1; 2170 2171 mutex_lock(&ep->com.mutex); 2172 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, 2173 ep->com.state); 2174 switch (ep->com.state) { 2175 case MPA_REQ_SENT: 2176 __state_set(&ep->com, ABORTING); 2177 connect_reply_upcall(ep, -ETIMEDOUT); 2178 break; 2179 case MPA_REQ_WAIT: 2180 __state_set(&ep->com, ABORTING); 2181 break; 2182 case CLOSING: 2183 case MORIBUND: 2184 if (ep->com.cm_id && ep->com.qp) { 2185 attrs.next_state = C4IW_QP_STATE_ERROR; 2186 c4iw_modify_qp(ep->com.qp->rhp, 2187 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 2188 &attrs, 1); 2189 } 2190 __state_set(&ep->com, ABORTING); 2191 break; 2192 default: 2193 printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n", 2194 __func__, ep, ep->hwtid, ep->com.state); 2195 WARN_ON(1); 2196 abort = 0; 2197 } 2198 mutex_unlock(&ep->com.mutex); 2199 if (abort) 2200 abort_connection(ep, NULL, GFP_KERNEL); 2201 c4iw_put_ep(&ep->com); 2202 } 2203 2204 static void process_timedout_eps(void) 2205 { 2206 struct c4iw_ep *ep; 2207 2208 spin_lock_irq(&timeout_lock); 2209 while (!list_empty(&timeout_list)) { 2210 struct list_head *tmp; 2211 2212 tmp = timeout_list.next; 2213 list_del(tmp); 2214 spin_unlock_irq(&timeout_lock); 2215 ep = list_entry(tmp, struct c4iw_ep, entry); 2216 process_timeout(ep); 2217 spin_lock_irq(&timeout_lock); 2218 } 2219 spin_unlock_irq(&timeout_lock); 2220 } 2221 2222 static void process_work(struct work_struct *work) 2223 { 2224 struct sk_buff *skb = NULL; 2225 struct c4iw_dev *dev; 2226 struct cpl_act_establish *rpl; 2227 unsigned int opcode; 2228 int ret; 2229 2230 while ((skb = skb_dequeue(&rxq))) { 2231 rpl = cplhdr(skb); 2232 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); 2233 opcode = rpl->ot.opcode; 2234 2235 BUG_ON(!work_handlers[opcode]); 2236 ret = work_handlers[opcode](dev, skb); 2237 if (!ret) 2238 kfree_skb(skb); 2239 } 2240 process_timedout_eps(); 2241 } 2242 2243 static DECLARE_WORK(skb_work, process_work); 2244 2245 static void ep_timeout(unsigned long arg) 2246 { 2247 struct c4iw_ep *ep = (struct c4iw_ep *)arg; 2248 2249 spin_lock(&timeout_lock); 2250 list_add_tail(&ep->entry, &timeout_list); 2251 spin_unlock(&timeout_lock); 2252 queue_work(workq, &skb_work); 2253 } 2254 2255 /* 2256 * All the CM events are handled on a work queue to have a safe context. 2257 */ 2258 static int sched(struct c4iw_dev *dev, struct sk_buff *skb) 2259 { 2260 2261 /* 2262 * Save dev in the skb->cb area. 2263 */ 2264 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev; 2265 2266 /* 2267 * Queue the skb and schedule the worker thread. 2268 */ 2269 skb_queue_tail(&rxq, skb); 2270 queue_work(workq, &skb_work); 2271 return 0; 2272 } 2273 2274 static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2275 { 2276 struct cpl_set_tcb_rpl *rpl = cplhdr(skb); 2277 2278 if (rpl->status != CPL_ERR_NONE) { 2279 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u " 2280 "for tid %u\n", rpl->status, GET_TID(rpl)); 2281 } 2282 kfree_skb(skb); 2283 return 0; 2284 } 2285 2286 static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) 2287 { 2288 struct cpl_fw6_msg *rpl = cplhdr(skb); 2289 struct c4iw_wr_wait *wr_waitp; 2290 int ret; 2291 2292 PDBG("%s type %u\n", __func__, rpl->type); 2293 2294 switch (rpl->type) { 2295 case 1: 2296 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); 2297 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; 2298 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); 2299 if (wr_waitp) { 2300 if (ret) 2301 wr_waitp->ret = -ret; 2302 else 2303 wr_waitp->ret = 0; 2304 wr_waitp->done = 1; 2305 wake_up(&wr_waitp->wait); 2306 } 2307 kfree_skb(skb); 2308 break; 2309 case 2: 2310 sched(dev, skb); 2311 break; 2312 default: 2313 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, 2314 rpl->type); 2315 kfree_skb(skb); 2316 break; 2317 } 2318 return 0; 2319 } 2320 2321 /* 2322 * Most upcalls from the T4 Core go to sched() to 2323 * schedule the processing on a work queue. 2324 */ 2325 c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = { 2326 [CPL_ACT_ESTABLISH] = sched, 2327 [CPL_ACT_OPEN_RPL] = sched, 2328 [CPL_RX_DATA] = sched, 2329 [CPL_ABORT_RPL_RSS] = sched, 2330 [CPL_ABORT_RPL] = sched, 2331 [CPL_PASS_OPEN_RPL] = sched, 2332 [CPL_CLOSE_LISTSRV_RPL] = sched, 2333 [CPL_PASS_ACCEPT_REQ] = sched, 2334 [CPL_PASS_ESTABLISH] = sched, 2335 [CPL_PEER_CLOSE] = sched, 2336 [CPL_CLOSE_CON_RPL] = sched, 2337 [CPL_ABORT_REQ_RSS] = sched, 2338 [CPL_RDMA_TERMINATE] = sched, 2339 [CPL_FW4_ACK] = sched, 2340 [CPL_SET_TCB_RPL] = set_tcb_rpl, 2341 [CPL_FW6_MSG] = fw6_msg 2342 }; 2343 2344 int __init c4iw_cm_init(void) 2345 { 2346 spin_lock_init(&timeout_lock); 2347 skb_queue_head_init(&rxq); 2348 2349 workq = create_singlethread_workqueue("iw_cxgb4"); 2350 if (!workq) 2351 return -ENOMEM; 2352 2353 return 0; 2354 } 2355 2356 void __exit c4iw_cm_term(void) 2357 { 2358 WARN_ON(!list_empty(&timeout_list)); 2359 flush_workqueue(workq); 2360 destroy_workqueue(workq); 2361 } 2362