1cfdda9d7SSteve Wise /* 2cfdda9d7SSteve Wise * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. 3cfdda9d7SSteve Wise * 4cfdda9d7SSteve Wise * This software is available to you under a choice of one of two 5cfdda9d7SSteve Wise * licenses. You may choose to be licensed under the terms of the GNU 6cfdda9d7SSteve Wise * General Public License (GPL) Version 2, available from the file 7cfdda9d7SSteve Wise * COPYING in the main directory of this source tree, or the 8cfdda9d7SSteve Wise * OpenIB.org BSD license below: 9cfdda9d7SSteve Wise * 10cfdda9d7SSteve Wise * Redistribution and use in source and binary forms, with or 11cfdda9d7SSteve Wise * without modification, are permitted provided that the following 12cfdda9d7SSteve Wise * conditions are met: 13cfdda9d7SSteve Wise * 14cfdda9d7SSteve Wise * - Redistributions of source code must retain the above 15cfdda9d7SSteve Wise * copyright notice, this list of conditions and the following 16cfdda9d7SSteve Wise * disclaimer. 17cfdda9d7SSteve Wise * 18cfdda9d7SSteve Wise * - Redistributions in binary form must reproduce the above 19cfdda9d7SSteve Wise * copyright notice, this list of conditions and the following 20cfdda9d7SSteve Wise * disclaimer in the documentation and/or other materials 21cfdda9d7SSteve Wise * provided with the distribution. 22cfdda9d7SSteve Wise * 23cfdda9d7SSteve Wise * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24cfdda9d7SSteve Wise * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25cfdda9d7SSteve Wise * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26cfdda9d7SSteve Wise * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27cfdda9d7SSteve Wise * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28cfdda9d7SSteve Wise * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29cfdda9d7SSteve Wise * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30cfdda9d7SSteve Wise * SOFTWARE. 31cfdda9d7SSteve Wise */ 32cfdda9d7SSteve Wise #include <linux/module.h> 33cfdda9d7SSteve Wise #include <linux/list.h> 34cfdda9d7SSteve Wise #include <linux/workqueue.h> 35cfdda9d7SSteve Wise #include <linux/skbuff.h> 36cfdda9d7SSteve Wise #include <linux/timer.h> 37cfdda9d7SSteve Wise #include <linux/notifier.h> 38cfdda9d7SSteve Wise #include <linux/inetdevice.h> 39cfdda9d7SSteve Wise #include <linux/ip.h> 40cfdda9d7SSteve Wise #include <linux/tcp.h> 41cfdda9d7SSteve Wise 42cfdda9d7SSteve Wise #include <net/neighbour.h> 43cfdda9d7SSteve Wise #include <net/netevent.h> 44cfdda9d7SSteve Wise #include <net/route.h> 45cfdda9d7SSteve Wise 46cfdda9d7SSteve Wise #include "iw_cxgb4.h" 47cfdda9d7SSteve Wise 48cfdda9d7SSteve Wise static char *states[] = { 49cfdda9d7SSteve Wise "idle", 50cfdda9d7SSteve Wise "listen", 51cfdda9d7SSteve Wise "connecting", 52cfdda9d7SSteve Wise "mpa_wait_req", 53cfdda9d7SSteve Wise "mpa_req_sent", 54cfdda9d7SSteve Wise "mpa_req_rcvd", 55cfdda9d7SSteve Wise "mpa_rep_sent", 56cfdda9d7SSteve Wise "fpdu_mode", 57cfdda9d7SSteve Wise "aborting", 58cfdda9d7SSteve Wise "closing", 59cfdda9d7SSteve Wise "moribund", 60cfdda9d7SSteve Wise "dead", 61cfdda9d7SSteve Wise NULL, 62cfdda9d7SSteve Wise }; 63cfdda9d7SSteve Wise 64ba6d3925SSteve Wise static int dack_mode; 65ba6d3925SSteve Wise module_param(dack_mode, int, 0644); 66ba6d3925SSteve Wise MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)"); 67ba6d3925SSteve Wise 68be4c9badSRoland Dreier int c4iw_max_read_depth = 8; 69be4c9badSRoland Dreier module_param(c4iw_max_read_depth, int, 0644); 70be4c9badSRoland Dreier MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)"); 71be4c9badSRoland Dreier 72cfdda9d7SSteve Wise static int enable_tcp_timestamps; 73cfdda9d7SSteve Wise module_param(enable_tcp_timestamps, int, 0644); 74cfdda9d7SSteve Wise MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); 75cfdda9d7SSteve Wise 76cfdda9d7SSteve Wise static int enable_tcp_sack; 77cfdda9d7SSteve Wise module_param(enable_tcp_sack, int, 0644); 78cfdda9d7SSteve Wise MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)"); 79cfdda9d7SSteve Wise 80cfdda9d7SSteve Wise static int enable_tcp_window_scaling = 1; 81cfdda9d7SSteve Wise module_param(enable_tcp_window_scaling, int, 0644); 82cfdda9d7SSteve Wise MODULE_PARM_DESC(enable_tcp_window_scaling, 83cfdda9d7SSteve Wise "Enable tcp window scaling (default=1)"); 84cfdda9d7SSteve Wise 85cfdda9d7SSteve Wise int c4iw_debug; 86cfdda9d7SSteve Wise module_param(c4iw_debug, int, 0644); 87cfdda9d7SSteve Wise MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)"); 88cfdda9d7SSteve Wise 89cfdda9d7SSteve Wise static int peer2peer; 90cfdda9d7SSteve Wise module_param(peer2peer, int, 0644); 91cfdda9d7SSteve Wise MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)"); 92cfdda9d7SSteve Wise 93cfdda9d7SSteve Wise static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; 94cfdda9d7SSteve Wise module_param(p2p_type, int, 0644); 95cfdda9d7SSteve Wise MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: " 96cfdda9d7SSteve Wise "1=RDMA_READ 0=RDMA_WRITE (default 1)"); 97cfdda9d7SSteve Wise 98cfdda9d7SSteve Wise static int ep_timeout_secs = 60; 99cfdda9d7SSteve Wise module_param(ep_timeout_secs, int, 0644); 100cfdda9d7SSteve Wise MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " 101cfdda9d7SSteve Wise "in seconds (default=60)"); 102cfdda9d7SSteve Wise 103cfdda9d7SSteve Wise static int mpa_rev = 1; 104cfdda9d7SSteve Wise module_param(mpa_rev, int, 0644); 105cfdda9d7SSteve Wise MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " 106cfdda9d7SSteve Wise "1 is spec compliant. (default=1)"); 107cfdda9d7SSteve Wise 108cfdda9d7SSteve Wise static int markers_enabled; 109cfdda9d7SSteve Wise module_param(markers_enabled, int, 0644); 110cfdda9d7SSteve Wise MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)"); 111cfdda9d7SSteve Wise 112cfdda9d7SSteve Wise static int crc_enabled = 1; 113cfdda9d7SSteve Wise module_param(crc_enabled, int, 0644); 114cfdda9d7SSteve Wise MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)"); 115cfdda9d7SSteve Wise 116cfdda9d7SSteve Wise static int rcv_win = 256 * 1024; 117cfdda9d7SSteve Wise module_param(rcv_win, int, 0644); 118cfdda9d7SSteve Wise MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)"); 119cfdda9d7SSteve Wise 120cfdda9d7SSteve Wise static int snd_win = 32 * 1024; 121cfdda9d7SSteve Wise module_param(snd_win, int, 0644); 122cfdda9d7SSteve Wise MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)"); 123cfdda9d7SSteve Wise 124cfdda9d7SSteve Wise static struct workqueue_struct *workq; 125cfdda9d7SSteve Wise 126cfdda9d7SSteve Wise static struct sk_buff_head rxq; 127cfdda9d7SSteve Wise 128cfdda9d7SSteve Wise static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); 129cfdda9d7SSteve Wise static void ep_timeout(unsigned long arg); 130cfdda9d7SSteve Wise static void connect_reply_upcall(struct c4iw_ep *ep, int status); 131cfdda9d7SSteve Wise 132be4c9badSRoland Dreier static LIST_HEAD(timeout_list); 133be4c9badSRoland Dreier static spinlock_t timeout_lock; 134be4c9badSRoland Dreier 135cfdda9d7SSteve Wise static void start_ep_timer(struct c4iw_ep *ep) 136cfdda9d7SSteve Wise { 137cfdda9d7SSteve Wise PDBG("%s ep %p\n", __func__, ep); 138cfdda9d7SSteve Wise if (timer_pending(&ep->timer)) { 139cfdda9d7SSteve Wise PDBG("%s stopped / restarted timer ep %p\n", __func__, ep); 140cfdda9d7SSteve Wise del_timer_sync(&ep->timer); 141cfdda9d7SSteve Wise } else 142cfdda9d7SSteve Wise c4iw_get_ep(&ep->com); 143cfdda9d7SSteve Wise ep->timer.expires = jiffies + ep_timeout_secs * HZ; 144cfdda9d7SSteve Wise ep->timer.data = (unsigned long)ep; 145cfdda9d7SSteve Wise ep->timer.function = ep_timeout; 146cfdda9d7SSteve Wise add_timer(&ep->timer); 147cfdda9d7SSteve Wise } 148cfdda9d7SSteve Wise 149cfdda9d7SSteve Wise static void stop_ep_timer(struct c4iw_ep *ep) 150cfdda9d7SSteve Wise { 151cfdda9d7SSteve Wise PDBG("%s ep %p\n", __func__, ep); 152cfdda9d7SSteve Wise if (!timer_pending(&ep->timer)) { 153cfdda9d7SSteve Wise printk(KERN_ERR "%s timer stopped when its not running! " 154cfdda9d7SSteve Wise "ep %p state %u\n", __func__, ep, ep->com.state); 155cfdda9d7SSteve Wise WARN_ON(1); 156cfdda9d7SSteve Wise return; 157cfdda9d7SSteve Wise } 158cfdda9d7SSteve Wise del_timer_sync(&ep->timer); 159cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 160cfdda9d7SSteve Wise } 161cfdda9d7SSteve Wise 162cfdda9d7SSteve Wise static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, 163cfdda9d7SSteve Wise struct l2t_entry *l2e) 164cfdda9d7SSteve Wise { 165cfdda9d7SSteve Wise int error = 0; 166cfdda9d7SSteve Wise 167cfdda9d7SSteve Wise if (c4iw_fatal_error(rdev)) { 168cfdda9d7SSteve Wise kfree_skb(skb); 169cfdda9d7SSteve Wise PDBG("%s - device in error state - dropping\n", __func__); 170cfdda9d7SSteve Wise return -EIO; 171cfdda9d7SSteve Wise } 172cfdda9d7SSteve Wise error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); 173cfdda9d7SSteve Wise if (error < 0) 174cfdda9d7SSteve Wise kfree_skb(skb); 175cfdda9d7SSteve Wise return error; 176cfdda9d7SSteve Wise } 177cfdda9d7SSteve Wise 178cfdda9d7SSteve Wise int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) 179cfdda9d7SSteve Wise { 180cfdda9d7SSteve Wise int error = 0; 181cfdda9d7SSteve Wise 182cfdda9d7SSteve Wise if (c4iw_fatal_error(rdev)) { 183cfdda9d7SSteve Wise kfree_skb(skb); 184cfdda9d7SSteve Wise PDBG("%s - device in error state - dropping\n", __func__); 185cfdda9d7SSteve Wise return -EIO; 186cfdda9d7SSteve Wise } 187cfdda9d7SSteve Wise error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); 188cfdda9d7SSteve Wise if (error < 0) 189cfdda9d7SSteve Wise kfree_skb(skb); 190cfdda9d7SSteve Wise return error; 191cfdda9d7SSteve Wise } 192cfdda9d7SSteve Wise 193cfdda9d7SSteve Wise static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) 194cfdda9d7SSteve Wise { 195cfdda9d7SSteve Wise struct cpl_tid_release *req; 196cfdda9d7SSteve Wise 197cfdda9d7SSteve Wise skb = get_skb(skb, sizeof *req, GFP_KERNEL); 198cfdda9d7SSteve Wise if (!skb) 199cfdda9d7SSteve Wise return; 200cfdda9d7SSteve Wise req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); 201cfdda9d7SSteve Wise INIT_TP_WR(req, hwtid); 202cfdda9d7SSteve Wise OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); 203cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); 204cfdda9d7SSteve Wise c4iw_ofld_send(rdev, skb); 205cfdda9d7SSteve Wise return; 206cfdda9d7SSteve Wise } 207cfdda9d7SSteve Wise 208cfdda9d7SSteve Wise static void set_emss(struct c4iw_ep *ep, u16 opt) 209cfdda9d7SSteve Wise { 210cfdda9d7SSteve Wise ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40; 211cfdda9d7SSteve Wise ep->mss = ep->emss; 212cfdda9d7SSteve Wise if (GET_TCPOPT_TSTAMP(opt)) 213cfdda9d7SSteve Wise ep->emss -= 12; 214cfdda9d7SSteve Wise if (ep->emss < 128) 215cfdda9d7SSteve Wise ep->emss = 128; 216cfdda9d7SSteve Wise PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt), 217cfdda9d7SSteve Wise ep->mss, ep->emss); 218cfdda9d7SSteve Wise } 219cfdda9d7SSteve Wise 220cfdda9d7SSteve Wise static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) 221cfdda9d7SSteve Wise { 222cfdda9d7SSteve Wise unsigned long flags; 223cfdda9d7SSteve Wise enum c4iw_ep_state state; 224cfdda9d7SSteve Wise 225cfdda9d7SSteve Wise spin_lock_irqsave(&epc->lock, flags); 226cfdda9d7SSteve Wise state = epc->state; 227cfdda9d7SSteve Wise spin_unlock_irqrestore(&epc->lock, flags); 228cfdda9d7SSteve Wise return state; 229cfdda9d7SSteve Wise } 230cfdda9d7SSteve Wise 231cfdda9d7SSteve Wise static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 232cfdda9d7SSteve Wise { 233cfdda9d7SSteve Wise epc->state = new; 234cfdda9d7SSteve Wise } 235cfdda9d7SSteve Wise 236cfdda9d7SSteve Wise static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 237cfdda9d7SSteve Wise { 238cfdda9d7SSteve Wise unsigned long flags; 239cfdda9d7SSteve Wise 240cfdda9d7SSteve Wise spin_lock_irqsave(&epc->lock, flags); 241cfdda9d7SSteve Wise PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]); 242cfdda9d7SSteve Wise __state_set(epc, new); 243cfdda9d7SSteve Wise spin_unlock_irqrestore(&epc->lock, flags); 244cfdda9d7SSteve Wise return; 245cfdda9d7SSteve Wise } 246cfdda9d7SSteve Wise 247cfdda9d7SSteve Wise static void *alloc_ep(int size, gfp_t gfp) 248cfdda9d7SSteve Wise { 249cfdda9d7SSteve Wise struct c4iw_ep_common *epc; 250cfdda9d7SSteve Wise 251cfdda9d7SSteve Wise epc = kzalloc(size, gfp); 252cfdda9d7SSteve Wise if (epc) { 253cfdda9d7SSteve Wise kref_init(&epc->kref); 254cfdda9d7SSteve Wise spin_lock_init(&epc->lock); 255cfdda9d7SSteve Wise init_waitqueue_head(&epc->waitq); 256cfdda9d7SSteve Wise } 257cfdda9d7SSteve Wise PDBG("%s alloc ep %p\n", __func__, epc); 258cfdda9d7SSteve Wise return epc; 259cfdda9d7SSteve Wise } 260cfdda9d7SSteve Wise 261cfdda9d7SSteve Wise void _c4iw_free_ep(struct kref *kref) 262cfdda9d7SSteve Wise { 263cfdda9d7SSteve Wise struct c4iw_ep *ep; 264cfdda9d7SSteve Wise 265cfdda9d7SSteve Wise ep = container_of(kref, struct c4iw_ep, com.kref); 266cfdda9d7SSteve Wise PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); 267cfdda9d7SSteve Wise if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { 268cfdda9d7SSteve Wise cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); 269cfdda9d7SSteve Wise dst_release(ep->dst); 270cfdda9d7SSteve Wise cxgb4_l2t_release(ep->l2t); 271cfdda9d7SSteve Wise } 272cfdda9d7SSteve Wise kfree(ep); 273cfdda9d7SSteve Wise } 274cfdda9d7SSteve Wise 275cfdda9d7SSteve Wise static void release_ep_resources(struct c4iw_ep *ep) 276cfdda9d7SSteve Wise { 277cfdda9d7SSteve Wise set_bit(RELEASE_RESOURCES, &ep->com.flags); 278cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 279cfdda9d7SSteve Wise } 280cfdda9d7SSteve Wise 281cfdda9d7SSteve Wise static int status2errno(int status) 282cfdda9d7SSteve Wise { 283cfdda9d7SSteve Wise switch (status) { 284cfdda9d7SSteve Wise case CPL_ERR_NONE: 285cfdda9d7SSteve Wise return 0; 286cfdda9d7SSteve Wise case CPL_ERR_CONN_RESET: 287cfdda9d7SSteve Wise return -ECONNRESET; 288cfdda9d7SSteve Wise case CPL_ERR_ARP_MISS: 289cfdda9d7SSteve Wise return -EHOSTUNREACH; 290cfdda9d7SSteve Wise case CPL_ERR_CONN_TIMEDOUT: 291cfdda9d7SSteve Wise return -ETIMEDOUT; 292cfdda9d7SSteve Wise case CPL_ERR_TCAM_FULL: 293cfdda9d7SSteve Wise return -ENOMEM; 294cfdda9d7SSteve Wise case CPL_ERR_CONN_EXIST: 295cfdda9d7SSteve Wise return -EADDRINUSE; 296cfdda9d7SSteve Wise default: 297cfdda9d7SSteve Wise return -EIO; 298cfdda9d7SSteve Wise } 299cfdda9d7SSteve Wise } 300cfdda9d7SSteve Wise 301cfdda9d7SSteve Wise /* 302cfdda9d7SSteve Wise * Try and reuse skbs already allocated... 303cfdda9d7SSteve Wise */ 304cfdda9d7SSteve Wise static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) 305cfdda9d7SSteve Wise { 306cfdda9d7SSteve Wise if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) { 307cfdda9d7SSteve Wise skb_trim(skb, 0); 308cfdda9d7SSteve Wise skb_get(skb); 309cfdda9d7SSteve Wise skb_reset_transport_header(skb); 310cfdda9d7SSteve Wise } else { 311cfdda9d7SSteve Wise skb = alloc_skb(len, gfp); 312cfdda9d7SSteve Wise } 313cfdda9d7SSteve Wise return skb; 314cfdda9d7SSteve Wise } 315cfdda9d7SSteve Wise 316cfdda9d7SSteve Wise static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip, 317cfdda9d7SSteve Wise __be32 peer_ip, __be16 local_port, 318cfdda9d7SSteve Wise __be16 peer_port, u8 tos) 319cfdda9d7SSteve Wise { 320cfdda9d7SSteve Wise struct rtable *rt; 321cfdda9d7SSteve Wise struct flowi fl = { 322cfdda9d7SSteve Wise .oif = 0, 323cfdda9d7SSteve Wise .nl_u = { 324cfdda9d7SSteve Wise .ip4_u = { 325cfdda9d7SSteve Wise .daddr = peer_ip, 326cfdda9d7SSteve Wise .saddr = local_ip, 327cfdda9d7SSteve Wise .tos = tos} 328cfdda9d7SSteve Wise }, 329cfdda9d7SSteve Wise .proto = IPPROTO_TCP, 330cfdda9d7SSteve Wise .uli_u = { 331cfdda9d7SSteve Wise .ports = { 332cfdda9d7SSteve Wise .sport = local_port, 333cfdda9d7SSteve Wise .dport = peer_port} 334cfdda9d7SSteve Wise } 335cfdda9d7SSteve Wise }; 336cfdda9d7SSteve Wise 337cfdda9d7SSteve Wise if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0)) 338cfdda9d7SSteve Wise return NULL; 339cfdda9d7SSteve Wise return rt; 340cfdda9d7SSteve Wise } 341cfdda9d7SSteve Wise 342cfdda9d7SSteve Wise static void arp_failure_discard(void *handle, struct sk_buff *skb) 343cfdda9d7SSteve Wise { 344cfdda9d7SSteve Wise PDBG("%s c4iw_dev %p\n", __func__, handle); 345cfdda9d7SSteve Wise kfree_skb(skb); 346cfdda9d7SSteve Wise } 347cfdda9d7SSteve Wise 348cfdda9d7SSteve Wise /* 349cfdda9d7SSteve Wise * Handle an ARP failure for an active open. 350cfdda9d7SSteve Wise */ 351cfdda9d7SSteve Wise static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) 352cfdda9d7SSteve Wise { 353cfdda9d7SSteve Wise printk(KERN_ERR MOD "ARP failure duing connect\n"); 354cfdda9d7SSteve Wise kfree_skb(skb); 355cfdda9d7SSteve Wise } 356cfdda9d7SSteve Wise 357cfdda9d7SSteve Wise /* 358cfdda9d7SSteve Wise * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant 359cfdda9d7SSteve Wise * and send it along. 360cfdda9d7SSteve Wise */ 361cfdda9d7SSteve Wise static void abort_arp_failure(void *handle, struct sk_buff *skb) 362cfdda9d7SSteve Wise { 363cfdda9d7SSteve Wise struct c4iw_rdev *rdev = handle; 364cfdda9d7SSteve Wise struct cpl_abort_req *req = cplhdr(skb); 365cfdda9d7SSteve Wise 366cfdda9d7SSteve Wise PDBG("%s rdev %p\n", __func__, rdev); 367cfdda9d7SSteve Wise req->cmd = CPL_ABORT_NO_RST; 368cfdda9d7SSteve Wise c4iw_ofld_send(rdev, skb); 369cfdda9d7SSteve Wise } 370cfdda9d7SSteve Wise 371cfdda9d7SSteve Wise static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) 372cfdda9d7SSteve Wise { 373cfdda9d7SSteve Wise unsigned int flowclen = 80; 374cfdda9d7SSteve Wise struct fw_flowc_wr *flowc; 375cfdda9d7SSteve Wise int i; 376cfdda9d7SSteve Wise 377cfdda9d7SSteve Wise skb = get_skb(skb, flowclen, GFP_KERNEL); 378cfdda9d7SSteve Wise flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); 379cfdda9d7SSteve Wise 380cfdda9d7SSteve Wise flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) | 381cfdda9d7SSteve Wise FW_FLOWC_WR_NPARAMS(8)); 382cfdda9d7SSteve Wise flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen, 383cfdda9d7SSteve Wise 16)) | FW_WR_FLOWID(ep->hwtid)); 384cfdda9d7SSteve Wise 385cfdda9d7SSteve Wise flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 386cfdda9d7SSteve Wise flowc->mnemval[0].val = cpu_to_be32(0); 387cfdda9d7SSteve Wise flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 388cfdda9d7SSteve Wise flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); 389cfdda9d7SSteve Wise flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 390cfdda9d7SSteve Wise flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan); 391cfdda9d7SSteve Wise flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 392cfdda9d7SSteve Wise flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid); 393cfdda9d7SSteve Wise flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 394cfdda9d7SSteve Wise flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq); 395cfdda9d7SSteve Wise flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 396cfdda9d7SSteve Wise flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); 397cfdda9d7SSteve Wise flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 398cfdda9d7SSteve Wise flowc->mnemval[6].val = cpu_to_be32(snd_win); 399cfdda9d7SSteve Wise flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 400cfdda9d7SSteve Wise flowc->mnemval[7].val = cpu_to_be32(ep->emss); 401cfdda9d7SSteve Wise /* Pad WR to 16 byte boundary */ 402cfdda9d7SSteve Wise flowc->mnemval[8].mnemonic = 0; 403cfdda9d7SSteve Wise flowc->mnemval[8].val = 0; 404cfdda9d7SSteve Wise for (i = 0; i < 9; i++) { 405cfdda9d7SSteve Wise flowc->mnemval[i].r4[0] = 0; 406cfdda9d7SSteve Wise flowc->mnemval[i].r4[1] = 0; 407cfdda9d7SSteve Wise flowc->mnemval[i].r4[2] = 0; 408cfdda9d7SSteve Wise } 409cfdda9d7SSteve Wise 410cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 411cfdda9d7SSteve Wise c4iw_ofld_send(&ep->com.dev->rdev, skb); 412cfdda9d7SSteve Wise } 413cfdda9d7SSteve Wise 414cfdda9d7SSteve Wise static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp) 415cfdda9d7SSteve Wise { 416cfdda9d7SSteve Wise struct cpl_close_con_req *req; 417cfdda9d7SSteve Wise struct sk_buff *skb; 418cfdda9d7SSteve Wise int wrlen = roundup(sizeof *req, 16); 419cfdda9d7SSteve Wise 420cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 421cfdda9d7SSteve Wise skb = get_skb(NULL, wrlen, gfp); 422cfdda9d7SSteve Wise if (!skb) { 423cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); 424cfdda9d7SSteve Wise return -ENOMEM; 425cfdda9d7SSteve Wise } 426cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 427cfdda9d7SSteve Wise t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 428cfdda9d7SSteve Wise req = (struct cpl_close_con_req *) skb_put(skb, wrlen); 429cfdda9d7SSteve Wise memset(req, 0, wrlen); 430cfdda9d7SSteve Wise INIT_TP_WR(req, ep->hwtid); 431cfdda9d7SSteve Wise OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, 432cfdda9d7SSteve Wise ep->hwtid)); 433cfdda9d7SSteve Wise return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 434cfdda9d7SSteve Wise } 435cfdda9d7SSteve Wise 436cfdda9d7SSteve Wise static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) 437cfdda9d7SSteve Wise { 438cfdda9d7SSteve Wise struct cpl_abort_req *req; 439cfdda9d7SSteve Wise int wrlen = roundup(sizeof *req, 16); 440cfdda9d7SSteve Wise 441cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 442cfdda9d7SSteve Wise skb = get_skb(skb, wrlen, gfp); 443cfdda9d7SSteve Wise if (!skb) { 444cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 445cfdda9d7SSteve Wise __func__); 446cfdda9d7SSteve Wise return -ENOMEM; 447cfdda9d7SSteve Wise } 448cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 449cfdda9d7SSteve Wise t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure); 450cfdda9d7SSteve Wise req = (struct cpl_abort_req *) skb_put(skb, wrlen); 451cfdda9d7SSteve Wise memset(req, 0, wrlen); 452cfdda9d7SSteve Wise INIT_TP_WR(req, ep->hwtid); 453cfdda9d7SSteve Wise OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); 454cfdda9d7SSteve Wise req->cmd = CPL_ABORT_SEND_RST; 455cfdda9d7SSteve Wise return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 456cfdda9d7SSteve Wise } 457cfdda9d7SSteve Wise 458cfdda9d7SSteve Wise static int send_connect(struct c4iw_ep *ep) 459cfdda9d7SSteve Wise { 460cfdda9d7SSteve Wise struct cpl_act_open_req *req; 461cfdda9d7SSteve Wise struct sk_buff *skb; 462cfdda9d7SSteve Wise u64 opt0; 463cfdda9d7SSteve Wise u32 opt2; 464cfdda9d7SSteve Wise unsigned int mtu_idx; 465cfdda9d7SSteve Wise int wscale; 466cfdda9d7SSteve Wise int wrlen = roundup(sizeof *req, 16); 467cfdda9d7SSteve Wise 468cfdda9d7SSteve Wise PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); 469cfdda9d7SSteve Wise 470cfdda9d7SSteve Wise skb = get_skb(NULL, wrlen, GFP_KERNEL); 471cfdda9d7SSteve Wise if (!skb) { 472cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 473cfdda9d7SSteve Wise __func__); 474cfdda9d7SSteve Wise return -ENOMEM; 475cfdda9d7SSteve Wise } 476d4f1a5c6SSteve Wise set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 477cfdda9d7SSteve Wise 478cfdda9d7SSteve Wise cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 479cfdda9d7SSteve Wise wscale = compute_wscale(rcv_win); 480cfdda9d7SSteve Wise opt0 = KEEP_ALIVE(1) | 481ba6d3925SSteve Wise DELACK(1) | 482cfdda9d7SSteve Wise WND_SCALE(wscale) | 483cfdda9d7SSteve Wise MSS_IDX(mtu_idx) | 484cfdda9d7SSteve Wise L2T_IDX(ep->l2t->idx) | 485cfdda9d7SSteve Wise TX_CHAN(ep->tx_chan) | 486cfdda9d7SSteve Wise SMAC_SEL(ep->smac_idx) | 487cfdda9d7SSteve Wise DSCP(ep->tos) | 488cfdda9d7SSteve Wise RCV_BUFSIZ(rcv_win>>10); 489cfdda9d7SSteve Wise opt2 = RX_CHANNEL(0) | 490cfdda9d7SSteve Wise RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 491cfdda9d7SSteve Wise if (enable_tcp_timestamps) 492cfdda9d7SSteve Wise opt2 |= TSTAMPS_EN(1); 493cfdda9d7SSteve Wise if (enable_tcp_sack) 494cfdda9d7SSteve Wise opt2 |= SACK_EN(1); 495cfdda9d7SSteve Wise if (wscale && enable_tcp_window_scaling) 496cfdda9d7SSteve Wise opt2 |= WND_SCALE_EN(1); 497cfdda9d7SSteve Wise t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); 498cfdda9d7SSteve Wise 499cfdda9d7SSteve Wise req = (struct cpl_act_open_req *) skb_put(skb, wrlen); 500cfdda9d7SSteve Wise INIT_TP_WR(req, 0); 501cfdda9d7SSteve Wise OPCODE_TID(req) = cpu_to_be32( 502cfdda9d7SSteve Wise MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid))); 503cfdda9d7SSteve Wise req->local_port = ep->com.local_addr.sin_port; 504cfdda9d7SSteve Wise req->peer_port = ep->com.remote_addr.sin_port; 505cfdda9d7SSteve Wise req->local_ip = ep->com.local_addr.sin_addr.s_addr; 506cfdda9d7SSteve Wise req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; 507cfdda9d7SSteve Wise req->opt0 = cpu_to_be64(opt0); 508cfdda9d7SSteve Wise req->params = 0; 509cfdda9d7SSteve Wise req->opt2 = cpu_to_be32(opt2); 510cfdda9d7SSteve Wise return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 511cfdda9d7SSteve Wise } 512cfdda9d7SSteve Wise 513cfdda9d7SSteve Wise static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb) 514cfdda9d7SSteve Wise { 515cfdda9d7SSteve Wise int mpalen, wrlen; 516cfdda9d7SSteve Wise struct fw_ofld_tx_data_wr *req; 517cfdda9d7SSteve Wise struct mpa_message *mpa; 518cfdda9d7SSteve Wise 519cfdda9d7SSteve Wise PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 520cfdda9d7SSteve Wise 521cfdda9d7SSteve Wise BUG_ON(skb_cloned(skb)); 522cfdda9d7SSteve Wise 523cfdda9d7SSteve Wise mpalen = sizeof(*mpa) + ep->plen; 524cfdda9d7SSteve Wise wrlen = roundup(mpalen + sizeof *req, 16); 525cfdda9d7SSteve Wise skb = get_skb(skb, wrlen, GFP_KERNEL); 526cfdda9d7SSteve Wise if (!skb) { 527cfdda9d7SSteve Wise connect_reply_upcall(ep, -ENOMEM); 528cfdda9d7SSteve Wise return; 529cfdda9d7SSteve Wise } 530cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 531cfdda9d7SSteve Wise 532cfdda9d7SSteve Wise req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 533cfdda9d7SSteve Wise memset(req, 0, wrlen); 534cfdda9d7SSteve Wise req->op_to_immdlen = cpu_to_be32( 535cfdda9d7SSteve Wise FW_WR_OP(FW_OFLD_TX_DATA_WR) | 536cfdda9d7SSteve Wise FW_WR_COMPL(1) | 537cfdda9d7SSteve Wise FW_WR_IMMDLEN(mpalen)); 538cfdda9d7SSteve Wise req->flowid_len16 = cpu_to_be32( 539cfdda9d7SSteve Wise FW_WR_FLOWID(ep->hwtid) | 540cfdda9d7SSteve Wise FW_WR_LEN16(wrlen >> 4)); 541cfdda9d7SSteve Wise req->plen = cpu_to_be32(mpalen); 542cfdda9d7SSteve Wise req->tunnel_to_proxy = cpu_to_be32( 543cfdda9d7SSteve Wise FW_OFLD_TX_DATA_WR_FLUSH(1) | 544cfdda9d7SSteve Wise FW_OFLD_TX_DATA_WR_SHOVE(1)); 545cfdda9d7SSteve Wise 546cfdda9d7SSteve Wise mpa = (struct mpa_message *)(req + 1); 547cfdda9d7SSteve Wise memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); 548cfdda9d7SSteve Wise mpa->flags = (crc_enabled ? MPA_CRC : 0) | 549cfdda9d7SSteve Wise (markers_enabled ? MPA_MARKERS : 0); 550cfdda9d7SSteve Wise mpa->private_data_size = htons(ep->plen); 551cfdda9d7SSteve Wise mpa->revision = mpa_rev; 552cfdda9d7SSteve Wise 553cfdda9d7SSteve Wise if (ep->plen) 554cfdda9d7SSteve Wise memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen); 555cfdda9d7SSteve Wise 556cfdda9d7SSteve Wise /* 557cfdda9d7SSteve Wise * Reference the mpa skb. This ensures the data area 558cfdda9d7SSteve Wise * will remain in memory until the hw acks the tx. 559cfdda9d7SSteve Wise * Function fw4_ack() will deref it. 560cfdda9d7SSteve Wise */ 561cfdda9d7SSteve Wise skb_get(skb); 562cfdda9d7SSteve Wise t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 563cfdda9d7SSteve Wise BUG_ON(ep->mpa_skb); 564cfdda9d7SSteve Wise ep->mpa_skb = skb; 565cfdda9d7SSteve Wise c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 566cfdda9d7SSteve Wise start_ep_timer(ep); 567cfdda9d7SSteve Wise state_set(&ep->com, MPA_REQ_SENT); 568cfdda9d7SSteve Wise ep->mpa_attr.initiator = 1; 569cfdda9d7SSteve Wise return; 570cfdda9d7SSteve Wise } 571cfdda9d7SSteve Wise 572cfdda9d7SSteve Wise static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) 573cfdda9d7SSteve Wise { 574cfdda9d7SSteve Wise int mpalen, wrlen; 575cfdda9d7SSteve Wise struct fw_ofld_tx_data_wr *req; 576cfdda9d7SSteve Wise struct mpa_message *mpa; 577cfdda9d7SSteve Wise struct sk_buff *skb; 578cfdda9d7SSteve Wise 579cfdda9d7SSteve Wise PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 580cfdda9d7SSteve Wise 581cfdda9d7SSteve Wise mpalen = sizeof(*mpa) + plen; 582cfdda9d7SSteve Wise wrlen = roundup(mpalen + sizeof *req, 16); 583cfdda9d7SSteve Wise 584cfdda9d7SSteve Wise skb = get_skb(NULL, wrlen, GFP_KERNEL); 585cfdda9d7SSteve Wise if (!skb) { 586cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); 587cfdda9d7SSteve Wise return -ENOMEM; 588cfdda9d7SSteve Wise } 589cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 590cfdda9d7SSteve Wise 591cfdda9d7SSteve Wise req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 592cfdda9d7SSteve Wise memset(req, 0, wrlen); 593cfdda9d7SSteve Wise req->op_to_immdlen = cpu_to_be32( 594cfdda9d7SSteve Wise FW_WR_OP(FW_OFLD_TX_DATA_WR) | 595cfdda9d7SSteve Wise FW_WR_COMPL(1) | 596cfdda9d7SSteve Wise FW_WR_IMMDLEN(mpalen)); 597cfdda9d7SSteve Wise req->flowid_len16 = cpu_to_be32( 598cfdda9d7SSteve Wise FW_WR_FLOWID(ep->hwtid) | 599cfdda9d7SSteve Wise FW_WR_LEN16(wrlen >> 4)); 600cfdda9d7SSteve Wise req->plen = cpu_to_be32(mpalen); 601cfdda9d7SSteve Wise req->tunnel_to_proxy = cpu_to_be32( 602cfdda9d7SSteve Wise FW_OFLD_TX_DATA_WR_FLUSH(1) | 603cfdda9d7SSteve Wise FW_OFLD_TX_DATA_WR_SHOVE(1)); 604cfdda9d7SSteve Wise 605cfdda9d7SSteve Wise mpa = (struct mpa_message *)(req + 1); 606cfdda9d7SSteve Wise memset(mpa, 0, sizeof(*mpa)); 607cfdda9d7SSteve Wise memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 608cfdda9d7SSteve Wise mpa->flags = MPA_REJECT; 609cfdda9d7SSteve Wise mpa->revision = mpa_rev; 610cfdda9d7SSteve Wise mpa->private_data_size = htons(plen); 611cfdda9d7SSteve Wise if (plen) 612cfdda9d7SSteve Wise memcpy(mpa->private_data, pdata, plen); 613cfdda9d7SSteve Wise 614cfdda9d7SSteve Wise /* 615cfdda9d7SSteve Wise * Reference the mpa skb again. This ensures the data area 616cfdda9d7SSteve Wise * will remain in memory until the hw acks the tx. 617cfdda9d7SSteve Wise * Function fw4_ack() will deref it. 618cfdda9d7SSteve Wise */ 619cfdda9d7SSteve Wise skb_get(skb); 620cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 621cfdda9d7SSteve Wise t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 622cfdda9d7SSteve Wise BUG_ON(ep->mpa_skb); 623cfdda9d7SSteve Wise ep->mpa_skb = skb; 624cfdda9d7SSteve Wise return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 625cfdda9d7SSteve Wise } 626cfdda9d7SSteve Wise 627cfdda9d7SSteve Wise static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) 628cfdda9d7SSteve Wise { 629cfdda9d7SSteve Wise int mpalen, wrlen; 630cfdda9d7SSteve Wise struct fw_ofld_tx_data_wr *req; 631cfdda9d7SSteve Wise struct mpa_message *mpa; 632cfdda9d7SSteve Wise struct sk_buff *skb; 633cfdda9d7SSteve Wise 634cfdda9d7SSteve Wise PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 635cfdda9d7SSteve Wise 636cfdda9d7SSteve Wise mpalen = sizeof(*mpa) + plen; 637cfdda9d7SSteve Wise wrlen = roundup(mpalen + sizeof *req, 16); 638cfdda9d7SSteve Wise 639cfdda9d7SSteve Wise skb = get_skb(NULL, wrlen, GFP_KERNEL); 640cfdda9d7SSteve Wise if (!skb) { 641cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); 642cfdda9d7SSteve Wise return -ENOMEM; 643cfdda9d7SSteve Wise } 644cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 645cfdda9d7SSteve Wise 646cfdda9d7SSteve Wise req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); 647cfdda9d7SSteve Wise memset(req, 0, wrlen); 648cfdda9d7SSteve Wise req->op_to_immdlen = cpu_to_be32( 649cfdda9d7SSteve Wise FW_WR_OP(FW_OFLD_TX_DATA_WR) | 650cfdda9d7SSteve Wise FW_WR_COMPL(1) | 651cfdda9d7SSteve Wise FW_WR_IMMDLEN(mpalen)); 652cfdda9d7SSteve Wise req->flowid_len16 = cpu_to_be32( 653cfdda9d7SSteve Wise FW_WR_FLOWID(ep->hwtid) | 654cfdda9d7SSteve Wise FW_WR_LEN16(wrlen >> 4)); 655cfdda9d7SSteve Wise req->plen = cpu_to_be32(mpalen); 656cfdda9d7SSteve Wise req->tunnel_to_proxy = cpu_to_be32( 657cfdda9d7SSteve Wise FW_OFLD_TX_DATA_WR_FLUSH(1) | 658cfdda9d7SSteve Wise FW_OFLD_TX_DATA_WR_SHOVE(1)); 659cfdda9d7SSteve Wise 660cfdda9d7SSteve Wise mpa = (struct mpa_message *)(req + 1); 661cfdda9d7SSteve Wise memset(mpa, 0, sizeof(*mpa)); 662cfdda9d7SSteve Wise memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 663cfdda9d7SSteve Wise mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | 664cfdda9d7SSteve Wise (markers_enabled ? MPA_MARKERS : 0); 665cfdda9d7SSteve Wise mpa->revision = mpa_rev; 666cfdda9d7SSteve Wise mpa->private_data_size = htons(plen); 667cfdda9d7SSteve Wise if (plen) 668cfdda9d7SSteve Wise memcpy(mpa->private_data, pdata, plen); 669cfdda9d7SSteve Wise 670cfdda9d7SSteve Wise /* 671cfdda9d7SSteve Wise * Reference the mpa skb. This ensures the data area 672cfdda9d7SSteve Wise * will remain in memory until the hw acks the tx. 673cfdda9d7SSteve Wise * Function fw4_ack() will deref it. 674cfdda9d7SSteve Wise */ 675cfdda9d7SSteve Wise skb_get(skb); 676cfdda9d7SSteve Wise t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 677cfdda9d7SSteve Wise ep->mpa_skb = skb; 678cfdda9d7SSteve Wise state_set(&ep->com, MPA_REP_SENT); 679cfdda9d7SSteve Wise return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 680cfdda9d7SSteve Wise } 681cfdda9d7SSteve Wise 682cfdda9d7SSteve Wise static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) 683cfdda9d7SSteve Wise { 684cfdda9d7SSteve Wise struct c4iw_ep *ep; 685cfdda9d7SSteve Wise struct cpl_act_establish *req = cplhdr(skb); 686cfdda9d7SSteve Wise unsigned int tid = GET_TID(req); 687cfdda9d7SSteve Wise unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); 688cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 689cfdda9d7SSteve Wise 690cfdda9d7SSteve Wise ep = lookup_atid(t, atid); 691cfdda9d7SSteve Wise 692cfdda9d7SSteve Wise PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid, 693cfdda9d7SSteve Wise be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); 694cfdda9d7SSteve Wise 695cfdda9d7SSteve Wise dst_confirm(ep->dst); 696cfdda9d7SSteve Wise 697cfdda9d7SSteve Wise /* setup the hwtid for this connection */ 698cfdda9d7SSteve Wise ep->hwtid = tid; 699cfdda9d7SSteve Wise cxgb4_insert_tid(t, ep, tid); 700cfdda9d7SSteve Wise 701cfdda9d7SSteve Wise ep->snd_seq = be32_to_cpu(req->snd_isn); 702cfdda9d7SSteve Wise ep->rcv_seq = be32_to_cpu(req->rcv_isn); 703cfdda9d7SSteve Wise 704cfdda9d7SSteve Wise set_emss(ep, ntohs(req->tcp_opt)); 705cfdda9d7SSteve Wise 706cfdda9d7SSteve Wise /* dealloc the atid */ 707cfdda9d7SSteve Wise cxgb4_free_atid(t, atid); 708cfdda9d7SSteve Wise 709cfdda9d7SSteve Wise /* start MPA negotiation */ 710cfdda9d7SSteve Wise send_flowc(ep, NULL); 711cfdda9d7SSteve Wise send_mpa_req(ep, skb); 712cfdda9d7SSteve Wise 713cfdda9d7SSteve Wise return 0; 714cfdda9d7SSteve Wise } 715cfdda9d7SSteve Wise 716cfdda9d7SSteve Wise static void close_complete_upcall(struct c4iw_ep *ep) 717cfdda9d7SSteve Wise { 718cfdda9d7SSteve Wise struct iw_cm_event event; 719cfdda9d7SSteve Wise 720cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 721cfdda9d7SSteve Wise memset(&event, 0, sizeof(event)); 722cfdda9d7SSteve Wise event.event = IW_CM_EVENT_CLOSE; 723cfdda9d7SSteve Wise if (ep->com.cm_id) { 724cfdda9d7SSteve Wise PDBG("close complete delivered ep %p cm_id %p tid %u\n", 725cfdda9d7SSteve Wise ep, ep->com.cm_id, ep->hwtid); 726cfdda9d7SSteve Wise ep->com.cm_id->event_handler(ep->com.cm_id, &event); 727cfdda9d7SSteve Wise ep->com.cm_id->rem_ref(ep->com.cm_id); 728cfdda9d7SSteve Wise ep->com.cm_id = NULL; 729cfdda9d7SSteve Wise ep->com.qp = NULL; 730cfdda9d7SSteve Wise } 731cfdda9d7SSteve Wise } 732cfdda9d7SSteve Wise 733cfdda9d7SSteve Wise static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) 734cfdda9d7SSteve Wise { 735cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 736cfdda9d7SSteve Wise close_complete_upcall(ep); 737cfdda9d7SSteve Wise state_set(&ep->com, ABORTING); 738cfdda9d7SSteve Wise return send_abort(ep, skb, gfp); 739cfdda9d7SSteve Wise } 740cfdda9d7SSteve Wise 741cfdda9d7SSteve Wise static void peer_close_upcall(struct c4iw_ep *ep) 742cfdda9d7SSteve Wise { 743cfdda9d7SSteve Wise struct iw_cm_event event; 744cfdda9d7SSteve Wise 745cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 746cfdda9d7SSteve Wise memset(&event, 0, sizeof(event)); 747cfdda9d7SSteve Wise event.event = IW_CM_EVENT_DISCONNECT; 748cfdda9d7SSteve Wise if (ep->com.cm_id) { 749cfdda9d7SSteve Wise PDBG("peer close delivered ep %p cm_id %p tid %u\n", 750cfdda9d7SSteve Wise ep, ep->com.cm_id, ep->hwtid); 751cfdda9d7SSteve Wise ep->com.cm_id->event_handler(ep->com.cm_id, &event); 752cfdda9d7SSteve Wise } 753cfdda9d7SSteve Wise } 754cfdda9d7SSteve Wise 755cfdda9d7SSteve Wise static void peer_abort_upcall(struct c4iw_ep *ep) 756cfdda9d7SSteve Wise { 757cfdda9d7SSteve Wise struct iw_cm_event event; 758cfdda9d7SSteve Wise 759cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 760cfdda9d7SSteve Wise memset(&event, 0, sizeof(event)); 761cfdda9d7SSteve Wise event.event = IW_CM_EVENT_CLOSE; 762cfdda9d7SSteve Wise event.status = -ECONNRESET; 763cfdda9d7SSteve Wise if (ep->com.cm_id) { 764cfdda9d7SSteve Wise PDBG("abort delivered ep %p cm_id %p tid %u\n", ep, 765cfdda9d7SSteve Wise ep->com.cm_id, ep->hwtid); 766cfdda9d7SSteve Wise ep->com.cm_id->event_handler(ep->com.cm_id, &event); 767cfdda9d7SSteve Wise ep->com.cm_id->rem_ref(ep->com.cm_id); 768cfdda9d7SSteve Wise ep->com.cm_id = NULL; 769cfdda9d7SSteve Wise ep->com.qp = NULL; 770cfdda9d7SSteve Wise } 771cfdda9d7SSteve Wise } 772cfdda9d7SSteve Wise 773cfdda9d7SSteve Wise static void connect_reply_upcall(struct c4iw_ep *ep, int status) 774cfdda9d7SSteve Wise { 775cfdda9d7SSteve Wise struct iw_cm_event event; 776cfdda9d7SSteve Wise 777cfdda9d7SSteve Wise PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status); 778cfdda9d7SSteve Wise memset(&event, 0, sizeof(event)); 779cfdda9d7SSteve Wise event.event = IW_CM_EVENT_CONNECT_REPLY; 780cfdda9d7SSteve Wise event.status = status; 781cfdda9d7SSteve Wise event.local_addr = ep->com.local_addr; 782cfdda9d7SSteve Wise event.remote_addr = ep->com.remote_addr; 783cfdda9d7SSteve Wise 784cfdda9d7SSteve Wise if ((status == 0) || (status == -ECONNREFUSED)) { 785cfdda9d7SSteve Wise event.private_data_len = ep->plen; 786cfdda9d7SSteve Wise event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 787cfdda9d7SSteve Wise } 78885963e4cSRoland Dreier 789cfdda9d7SSteve Wise PDBG("%s ep %p tid %u status %d\n", __func__, ep, 790cfdda9d7SSteve Wise ep->hwtid, status); 791cfdda9d7SSteve Wise ep->com.cm_id->event_handler(ep->com.cm_id, &event); 79285963e4cSRoland Dreier 793cfdda9d7SSteve Wise if (status < 0) { 794cfdda9d7SSteve Wise ep->com.cm_id->rem_ref(ep->com.cm_id); 795cfdda9d7SSteve Wise ep->com.cm_id = NULL; 796cfdda9d7SSteve Wise ep->com.qp = NULL; 797cfdda9d7SSteve Wise } 798cfdda9d7SSteve Wise } 799cfdda9d7SSteve Wise 800cfdda9d7SSteve Wise static void connect_request_upcall(struct c4iw_ep *ep) 801cfdda9d7SSteve Wise { 802cfdda9d7SSteve Wise struct iw_cm_event event; 803cfdda9d7SSteve Wise 804cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 805cfdda9d7SSteve Wise memset(&event, 0, sizeof(event)); 806cfdda9d7SSteve Wise event.event = IW_CM_EVENT_CONNECT_REQUEST; 807cfdda9d7SSteve Wise event.local_addr = ep->com.local_addr; 808cfdda9d7SSteve Wise event.remote_addr = ep->com.remote_addr; 809cfdda9d7SSteve Wise event.private_data_len = ep->plen; 810cfdda9d7SSteve Wise event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 811cfdda9d7SSteve Wise event.provider_data = ep; 812cfdda9d7SSteve Wise if (state_read(&ep->parent_ep->com) != DEAD) { 813cfdda9d7SSteve Wise c4iw_get_ep(&ep->com); 814cfdda9d7SSteve Wise ep->parent_ep->com.cm_id->event_handler( 815cfdda9d7SSteve Wise ep->parent_ep->com.cm_id, 816cfdda9d7SSteve Wise &event); 817cfdda9d7SSteve Wise } 818cfdda9d7SSteve Wise c4iw_put_ep(&ep->parent_ep->com); 819cfdda9d7SSteve Wise ep->parent_ep = NULL; 820cfdda9d7SSteve Wise } 821cfdda9d7SSteve Wise 822cfdda9d7SSteve Wise static void established_upcall(struct c4iw_ep *ep) 823cfdda9d7SSteve Wise { 824cfdda9d7SSteve Wise struct iw_cm_event event; 825cfdda9d7SSteve Wise 826cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 827cfdda9d7SSteve Wise memset(&event, 0, sizeof(event)); 828cfdda9d7SSteve Wise event.event = IW_CM_EVENT_ESTABLISHED; 829cfdda9d7SSteve Wise if (ep->com.cm_id) { 830cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 831cfdda9d7SSteve Wise ep->com.cm_id->event_handler(ep->com.cm_id, &event); 832cfdda9d7SSteve Wise } 833cfdda9d7SSteve Wise } 834cfdda9d7SSteve Wise 835cfdda9d7SSteve Wise static int update_rx_credits(struct c4iw_ep *ep, u32 credits) 836cfdda9d7SSteve Wise { 837cfdda9d7SSteve Wise struct cpl_rx_data_ack *req; 838cfdda9d7SSteve Wise struct sk_buff *skb; 839cfdda9d7SSteve Wise int wrlen = roundup(sizeof *req, 16); 840cfdda9d7SSteve Wise 841cfdda9d7SSteve Wise PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); 842cfdda9d7SSteve Wise skb = get_skb(NULL, wrlen, GFP_KERNEL); 843cfdda9d7SSteve Wise if (!skb) { 844cfdda9d7SSteve Wise printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); 845cfdda9d7SSteve Wise return 0; 846cfdda9d7SSteve Wise } 847cfdda9d7SSteve Wise 848cfdda9d7SSteve Wise req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); 849cfdda9d7SSteve Wise memset(req, 0, wrlen); 850cfdda9d7SSteve Wise INIT_TP_WR(req, ep->hwtid); 851cfdda9d7SSteve Wise OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 852cfdda9d7SSteve Wise ep->hwtid)); 853ba6d3925SSteve Wise req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) | 854ba6d3925SSteve Wise F_RX_DACK_CHANGE | 855ba6d3925SSteve Wise V_RX_DACK_MODE(dack_mode)); 856d4f1a5c6SSteve Wise set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); 857cfdda9d7SSteve Wise c4iw_ofld_send(&ep->com.dev->rdev, skb); 858cfdda9d7SSteve Wise return credits; 859cfdda9d7SSteve Wise } 860cfdda9d7SSteve Wise 861cfdda9d7SSteve Wise static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) 862cfdda9d7SSteve Wise { 863cfdda9d7SSteve Wise struct mpa_message *mpa; 864cfdda9d7SSteve Wise u16 plen; 865cfdda9d7SSteve Wise struct c4iw_qp_attributes attrs; 866cfdda9d7SSteve Wise enum c4iw_qp_attr_mask mask; 867cfdda9d7SSteve Wise int err; 868cfdda9d7SSteve Wise 869cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 870cfdda9d7SSteve Wise 871cfdda9d7SSteve Wise /* 872cfdda9d7SSteve Wise * Stop mpa timer. If it expired, then the state has 873cfdda9d7SSteve Wise * changed and we bail since ep_timeout already aborted 874cfdda9d7SSteve Wise * the connection. 875cfdda9d7SSteve Wise */ 876cfdda9d7SSteve Wise stop_ep_timer(ep); 877cfdda9d7SSteve Wise if (state_read(&ep->com) != MPA_REQ_SENT) 878cfdda9d7SSteve Wise return; 879cfdda9d7SSteve Wise 880cfdda9d7SSteve Wise /* 881cfdda9d7SSteve Wise * If we get more than the supported amount of private data 882cfdda9d7SSteve Wise * then we must fail this connection. 883cfdda9d7SSteve Wise */ 884cfdda9d7SSteve Wise if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 885cfdda9d7SSteve Wise err = -EINVAL; 886cfdda9d7SSteve Wise goto err; 887cfdda9d7SSteve Wise } 888cfdda9d7SSteve Wise 889cfdda9d7SSteve Wise /* 890cfdda9d7SSteve Wise * copy the new data into our accumulation buffer. 891cfdda9d7SSteve Wise */ 892cfdda9d7SSteve Wise skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 893cfdda9d7SSteve Wise skb->len); 894cfdda9d7SSteve Wise ep->mpa_pkt_len += skb->len; 895cfdda9d7SSteve Wise 896cfdda9d7SSteve Wise /* 897cfdda9d7SSteve Wise * if we don't even have the mpa message, then bail. 898cfdda9d7SSteve Wise */ 899cfdda9d7SSteve Wise if (ep->mpa_pkt_len < sizeof(*mpa)) 900cfdda9d7SSteve Wise return; 901cfdda9d7SSteve Wise mpa = (struct mpa_message *) ep->mpa_pkt; 902cfdda9d7SSteve Wise 903cfdda9d7SSteve Wise /* Validate MPA header. */ 904cfdda9d7SSteve Wise if (mpa->revision != mpa_rev) { 905cfdda9d7SSteve Wise err = -EPROTO; 906cfdda9d7SSteve Wise goto err; 907cfdda9d7SSteve Wise } 908cfdda9d7SSteve Wise if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { 909cfdda9d7SSteve Wise err = -EPROTO; 910cfdda9d7SSteve Wise goto err; 911cfdda9d7SSteve Wise } 912cfdda9d7SSteve Wise 913cfdda9d7SSteve Wise plen = ntohs(mpa->private_data_size); 914cfdda9d7SSteve Wise 915cfdda9d7SSteve Wise /* 916cfdda9d7SSteve Wise * Fail if there's too much private data. 917cfdda9d7SSteve Wise */ 918cfdda9d7SSteve Wise if (plen > MPA_MAX_PRIVATE_DATA) { 919cfdda9d7SSteve Wise err = -EPROTO; 920cfdda9d7SSteve Wise goto err; 921cfdda9d7SSteve Wise } 922cfdda9d7SSteve Wise 923cfdda9d7SSteve Wise /* 924cfdda9d7SSteve Wise * If plen does not account for pkt size 925cfdda9d7SSteve Wise */ 926cfdda9d7SSteve Wise if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 927cfdda9d7SSteve Wise err = -EPROTO; 928cfdda9d7SSteve Wise goto err; 929cfdda9d7SSteve Wise } 930cfdda9d7SSteve Wise 931cfdda9d7SSteve Wise ep->plen = (u8) plen; 932cfdda9d7SSteve Wise 933cfdda9d7SSteve Wise /* 934cfdda9d7SSteve Wise * If we don't have all the pdata yet, then bail. 935cfdda9d7SSteve Wise * We'll continue process when more data arrives. 936cfdda9d7SSteve Wise */ 937cfdda9d7SSteve Wise if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 938cfdda9d7SSteve Wise return; 939cfdda9d7SSteve Wise 940cfdda9d7SSteve Wise if (mpa->flags & MPA_REJECT) { 941cfdda9d7SSteve Wise err = -ECONNREFUSED; 942cfdda9d7SSteve Wise goto err; 943cfdda9d7SSteve Wise } 944cfdda9d7SSteve Wise 945cfdda9d7SSteve Wise /* 946cfdda9d7SSteve Wise * If we get here we have accumulated the entire mpa 947cfdda9d7SSteve Wise * start reply message including private data. And 948cfdda9d7SSteve Wise * the MPA header is valid. 949cfdda9d7SSteve Wise */ 950cfdda9d7SSteve Wise state_set(&ep->com, FPDU_MODE); 951cfdda9d7SSteve Wise ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 952cfdda9d7SSteve Wise ep->mpa_attr.recv_marker_enabled = markers_enabled; 953cfdda9d7SSteve Wise ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 954cfdda9d7SSteve Wise ep->mpa_attr.version = mpa_rev; 955cfdda9d7SSteve Wise ep->mpa_attr.p2p_type = peer2peer ? p2p_type : 956cfdda9d7SSteve Wise FW_RI_INIT_P2PTYPE_DISABLED; 957cfdda9d7SSteve Wise PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 958cfdda9d7SSteve Wise "xmit_marker_enabled=%d, version=%d\n", __func__, 959cfdda9d7SSteve Wise ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 960cfdda9d7SSteve Wise ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); 961cfdda9d7SSteve Wise 962cfdda9d7SSteve Wise attrs.mpa_attr = ep->mpa_attr; 963cfdda9d7SSteve Wise attrs.max_ird = ep->ird; 964cfdda9d7SSteve Wise attrs.max_ord = ep->ord; 965cfdda9d7SSteve Wise attrs.llp_stream_handle = ep; 966cfdda9d7SSteve Wise attrs.next_state = C4IW_QP_STATE_RTS; 967cfdda9d7SSteve Wise 968cfdda9d7SSteve Wise mask = C4IW_QP_ATTR_NEXT_STATE | 969cfdda9d7SSteve Wise C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | 970cfdda9d7SSteve Wise C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; 971cfdda9d7SSteve Wise 972cfdda9d7SSteve Wise /* bind QP and TID with INIT_WR */ 973cfdda9d7SSteve Wise err = c4iw_modify_qp(ep->com.qp->rhp, 974cfdda9d7SSteve Wise ep->com.qp, mask, &attrs, 1); 975cfdda9d7SSteve Wise if (err) 976cfdda9d7SSteve Wise goto err; 977cfdda9d7SSteve Wise goto out; 978cfdda9d7SSteve Wise err: 979b21ef16aSSteve Wise state_set(&ep->com, ABORTING); 980b21ef16aSSteve Wise send_abort(ep, skb, GFP_KERNEL); 981cfdda9d7SSteve Wise out: 982cfdda9d7SSteve Wise connect_reply_upcall(ep, err); 983cfdda9d7SSteve Wise return; 984cfdda9d7SSteve Wise } 985cfdda9d7SSteve Wise 986cfdda9d7SSteve Wise static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) 987cfdda9d7SSteve Wise { 988cfdda9d7SSteve Wise struct mpa_message *mpa; 989cfdda9d7SSteve Wise u16 plen; 990cfdda9d7SSteve Wise 991cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 992cfdda9d7SSteve Wise 993cfdda9d7SSteve Wise if (state_read(&ep->com) != MPA_REQ_WAIT) 994cfdda9d7SSteve Wise return; 995cfdda9d7SSteve Wise 996cfdda9d7SSteve Wise /* 997cfdda9d7SSteve Wise * If we get more than the supported amount of private data 998cfdda9d7SSteve Wise * then we must fail this connection. 999cfdda9d7SSteve Wise */ 1000cfdda9d7SSteve Wise if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 1001cfdda9d7SSteve Wise stop_ep_timer(ep); 1002cfdda9d7SSteve Wise abort_connection(ep, skb, GFP_KERNEL); 1003cfdda9d7SSteve Wise return; 1004cfdda9d7SSteve Wise } 1005cfdda9d7SSteve Wise 1006cfdda9d7SSteve Wise PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); 1007cfdda9d7SSteve Wise 1008cfdda9d7SSteve Wise /* 1009cfdda9d7SSteve Wise * Copy the new data into our accumulation buffer. 1010cfdda9d7SSteve Wise */ 1011cfdda9d7SSteve Wise skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 1012cfdda9d7SSteve Wise skb->len); 1013cfdda9d7SSteve Wise ep->mpa_pkt_len += skb->len; 1014cfdda9d7SSteve Wise 1015cfdda9d7SSteve Wise /* 1016cfdda9d7SSteve Wise * If we don't even have the mpa message, then bail. 1017cfdda9d7SSteve Wise * We'll continue process when more data arrives. 1018cfdda9d7SSteve Wise */ 1019cfdda9d7SSteve Wise if (ep->mpa_pkt_len < sizeof(*mpa)) 1020cfdda9d7SSteve Wise return; 1021cfdda9d7SSteve Wise 1022cfdda9d7SSteve Wise PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); 1023cfdda9d7SSteve Wise stop_ep_timer(ep); 1024cfdda9d7SSteve Wise mpa = (struct mpa_message *) ep->mpa_pkt; 1025cfdda9d7SSteve Wise 1026cfdda9d7SSteve Wise /* 1027cfdda9d7SSteve Wise * Validate MPA Header. 1028cfdda9d7SSteve Wise */ 1029cfdda9d7SSteve Wise if (mpa->revision != mpa_rev) { 1030cfdda9d7SSteve Wise abort_connection(ep, skb, GFP_KERNEL); 1031cfdda9d7SSteve Wise return; 1032cfdda9d7SSteve Wise } 1033cfdda9d7SSteve Wise 1034cfdda9d7SSteve Wise if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { 1035cfdda9d7SSteve Wise abort_connection(ep, skb, GFP_KERNEL); 1036cfdda9d7SSteve Wise return; 1037cfdda9d7SSteve Wise } 1038cfdda9d7SSteve Wise 1039cfdda9d7SSteve Wise plen = ntohs(mpa->private_data_size); 1040cfdda9d7SSteve Wise 1041cfdda9d7SSteve Wise /* 1042cfdda9d7SSteve Wise * Fail if there's too much private data. 1043cfdda9d7SSteve Wise */ 1044cfdda9d7SSteve Wise if (plen > MPA_MAX_PRIVATE_DATA) { 1045cfdda9d7SSteve Wise abort_connection(ep, skb, GFP_KERNEL); 1046cfdda9d7SSteve Wise return; 1047cfdda9d7SSteve Wise } 1048cfdda9d7SSteve Wise 1049cfdda9d7SSteve Wise /* 1050cfdda9d7SSteve Wise * If plen does not account for pkt size 1051cfdda9d7SSteve Wise */ 1052cfdda9d7SSteve Wise if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1053cfdda9d7SSteve Wise abort_connection(ep, skb, GFP_KERNEL); 1054cfdda9d7SSteve Wise return; 1055cfdda9d7SSteve Wise } 1056cfdda9d7SSteve Wise ep->plen = (u8) plen; 1057cfdda9d7SSteve Wise 1058cfdda9d7SSteve Wise /* 1059cfdda9d7SSteve Wise * If we don't have all the pdata yet, then bail. 1060cfdda9d7SSteve Wise */ 1061cfdda9d7SSteve Wise if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1062cfdda9d7SSteve Wise return; 1063cfdda9d7SSteve Wise 1064cfdda9d7SSteve Wise /* 1065cfdda9d7SSteve Wise * If we get here we have accumulated the entire mpa 1066cfdda9d7SSteve Wise * start reply message including private data. 1067cfdda9d7SSteve Wise */ 1068cfdda9d7SSteve Wise ep->mpa_attr.initiator = 0; 1069cfdda9d7SSteve Wise ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1070cfdda9d7SSteve Wise ep->mpa_attr.recv_marker_enabled = markers_enabled; 1071cfdda9d7SSteve Wise ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1072cfdda9d7SSteve Wise ep->mpa_attr.version = mpa_rev; 1073cfdda9d7SSteve Wise ep->mpa_attr.p2p_type = peer2peer ? p2p_type : 1074cfdda9d7SSteve Wise FW_RI_INIT_P2PTYPE_DISABLED; 1075cfdda9d7SSteve Wise PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 1076cfdda9d7SSteve Wise "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__, 1077cfdda9d7SSteve Wise ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 1078cfdda9d7SSteve Wise ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1079cfdda9d7SSteve Wise ep->mpa_attr.p2p_type); 1080cfdda9d7SSteve Wise 1081cfdda9d7SSteve Wise state_set(&ep->com, MPA_REQ_RCVD); 1082cfdda9d7SSteve Wise 1083cfdda9d7SSteve Wise /* drive upcall */ 1084cfdda9d7SSteve Wise connect_request_upcall(ep); 1085cfdda9d7SSteve Wise return; 1086cfdda9d7SSteve Wise } 1087cfdda9d7SSteve Wise 1088cfdda9d7SSteve Wise static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) 1089cfdda9d7SSteve Wise { 1090cfdda9d7SSteve Wise struct c4iw_ep *ep; 1091cfdda9d7SSteve Wise struct cpl_rx_data *hdr = cplhdr(skb); 1092cfdda9d7SSteve Wise unsigned int dlen = ntohs(hdr->len); 1093cfdda9d7SSteve Wise unsigned int tid = GET_TID(hdr); 1094cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1095cfdda9d7SSteve Wise 1096cfdda9d7SSteve Wise ep = lookup_tid(t, tid); 1097cfdda9d7SSteve Wise PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); 1098cfdda9d7SSteve Wise skb_pull(skb, sizeof(*hdr)); 1099cfdda9d7SSteve Wise skb_trim(skb, dlen); 1100cfdda9d7SSteve Wise 1101cfdda9d7SSteve Wise ep->rcv_seq += dlen; 1102cfdda9d7SSteve Wise BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen)); 1103cfdda9d7SSteve Wise 1104cfdda9d7SSteve Wise /* update RX credits */ 1105cfdda9d7SSteve Wise update_rx_credits(ep, dlen); 1106cfdda9d7SSteve Wise 1107cfdda9d7SSteve Wise switch (state_read(&ep->com)) { 1108cfdda9d7SSteve Wise case MPA_REQ_SENT: 1109cfdda9d7SSteve Wise process_mpa_reply(ep, skb); 1110cfdda9d7SSteve Wise break; 1111cfdda9d7SSteve Wise case MPA_REQ_WAIT: 1112cfdda9d7SSteve Wise process_mpa_request(ep, skb); 1113cfdda9d7SSteve Wise break; 1114cfdda9d7SSteve Wise case MPA_REP_SENT: 1115cfdda9d7SSteve Wise break; 1116cfdda9d7SSteve Wise default: 1117cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s Unexpected streaming data." 1118cfdda9d7SSteve Wise " ep %p state %d tid %u\n", 1119cfdda9d7SSteve Wise __func__, ep, state_read(&ep->com), ep->hwtid); 1120cfdda9d7SSteve Wise 1121cfdda9d7SSteve Wise /* 1122cfdda9d7SSteve Wise * The ep will timeout and inform the ULP of the failure. 1123cfdda9d7SSteve Wise * See ep_timeout(). 1124cfdda9d7SSteve Wise */ 1125cfdda9d7SSteve Wise break; 1126cfdda9d7SSteve Wise } 1127cfdda9d7SSteve Wise return 0; 1128cfdda9d7SSteve Wise } 1129cfdda9d7SSteve Wise 1130cfdda9d7SSteve Wise static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1131cfdda9d7SSteve Wise { 1132cfdda9d7SSteve Wise struct c4iw_ep *ep; 1133cfdda9d7SSteve Wise struct cpl_abort_rpl_rss *rpl = cplhdr(skb); 1134cfdda9d7SSteve Wise unsigned long flags; 1135cfdda9d7SSteve Wise int release = 0; 1136cfdda9d7SSteve Wise unsigned int tid = GET_TID(rpl); 1137cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1138cfdda9d7SSteve Wise 1139cfdda9d7SSteve Wise ep = lookup_tid(t, tid); 1140cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1141cfdda9d7SSteve Wise BUG_ON(!ep); 1142cfdda9d7SSteve Wise spin_lock_irqsave(&ep->com.lock, flags); 1143cfdda9d7SSteve Wise switch (ep->com.state) { 1144cfdda9d7SSteve Wise case ABORTING: 1145cfdda9d7SSteve Wise __state_set(&ep->com, DEAD); 1146cfdda9d7SSteve Wise release = 1; 1147cfdda9d7SSteve Wise break; 1148cfdda9d7SSteve Wise default: 1149cfdda9d7SSteve Wise printk(KERN_ERR "%s ep %p state %d\n", 1150cfdda9d7SSteve Wise __func__, ep, ep->com.state); 1151cfdda9d7SSteve Wise break; 1152cfdda9d7SSteve Wise } 1153cfdda9d7SSteve Wise spin_unlock_irqrestore(&ep->com.lock, flags); 1154cfdda9d7SSteve Wise 1155cfdda9d7SSteve Wise if (release) 1156cfdda9d7SSteve Wise release_ep_resources(ep); 1157cfdda9d7SSteve Wise return 0; 1158cfdda9d7SSteve Wise } 1159cfdda9d7SSteve Wise 1160cfdda9d7SSteve Wise /* 1161cfdda9d7SSteve Wise * Return whether a failed active open has allocated a TID 1162cfdda9d7SSteve Wise */ 1163cfdda9d7SSteve Wise static inline int act_open_has_tid(int status) 1164cfdda9d7SSteve Wise { 1165cfdda9d7SSteve Wise return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && 1166cfdda9d7SSteve Wise status != CPL_ERR_ARP_MISS; 1167cfdda9d7SSteve Wise } 1168cfdda9d7SSteve Wise 1169cfdda9d7SSteve Wise static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1170cfdda9d7SSteve Wise { 1171cfdda9d7SSteve Wise struct c4iw_ep *ep; 1172cfdda9d7SSteve Wise struct cpl_act_open_rpl *rpl = cplhdr(skb); 1173cfdda9d7SSteve Wise unsigned int atid = GET_TID_TID(GET_AOPEN_ATID( 1174cfdda9d7SSteve Wise ntohl(rpl->atid_status))); 1175cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1176cfdda9d7SSteve Wise int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status)); 1177cfdda9d7SSteve Wise 1178cfdda9d7SSteve Wise ep = lookup_atid(t, atid); 1179cfdda9d7SSteve Wise 1180cfdda9d7SSteve Wise PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, 1181cfdda9d7SSteve Wise status, status2errno(status)); 1182cfdda9d7SSteve Wise 1183cfdda9d7SSteve Wise if (status == CPL_ERR_RTX_NEG_ADVICE) { 1184cfdda9d7SSteve Wise printk(KERN_WARNING MOD "Connection problems for atid %u\n", 1185cfdda9d7SSteve Wise atid); 1186cfdda9d7SSteve Wise return 0; 1187cfdda9d7SSteve Wise } 1188cfdda9d7SSteve Wise 1189cfdda9d7SSteve Wise connect_reply_upcall(ep, status2errno(status)); 1190cfdda9d7SSteve Wise state_set(&ep->com, DEAD); 1191cfdda9d7SSteve Wise 1192cfdda9d7SSteve Wise if (status && act_open_has_tid(status)) 1193cfdda9d7SSteve Wise cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); 1194cfdda9d7SSteve Wise 1195cfdda9d7SSteve Wise cxgb4_free_atid(t, atid); 1196cfdda9d7SSteve Wise dst_release(ep->dst); 1197cfdda9d7SSteve Wise cxgb4_l2t_release(ep->l2t); 1198cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 1199cfdda9d7SSteve Wise 1200cfdda9d7SSteve Wise return 0; 1201cfdda9d7SSteve Wise } 1202cfdda9d7SSteve Wise 1203cfdda9d7SSteve Wise static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1204cfdda9d7SSteve Wise { 1205cfdda9d7SSteve Wise struct cpl_pass_open_rpl *rpl = cplhdr(skb); 1206cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1207cfdda9d7SSteve Wise unsigned int stid = GET_TID(rpl); 1208cfdda9d7SSteve Wise struct c4iw_listen_ep *ep = lookup_stid(t, stid); 1209cfdda9d7SSteve Wise 1210cfdda9d7SSteve Wise if (!ep) { 1211cfdda9d7SSteve Wise printk(KERN_ERR MOD "stid %d lookup failure!\n", stid); 1212cfdda9d7SSteve Wise return 0; 1213cfdda9d7SSteve Wise } 1214cfdda9d7SSteve Wise PDBG("%s ep %p status %d error %d\n", __func__, ep, 1215cfdda9d7SSteve Wise rpl->status, status2errno(rpl->status)); 1216cfdda9d7SSteve Wise ep->com.rpl_err = status2errno(rpl->status); 1217cfdda9d7SSteve Wise ep->com.rpl_done = 1; 1218cfdda9d7SSteve Wise wake_up(&ep->com.waitq); 1219cfdda9d7SSteve Wise 1220cfdda9d7SSteve Wise return 0; 1221cfdda9d7SSteve Wise } 1222cfdda9d7SSteve Wise 1223cfdda9d7SSteve Wise static int listen_stop(struct c4iw_listen_ep *ep) 1224cfdda9d7SSteve Wise { 1225cfdda9d7SSteve Wise struct sk_buff *skb; 1226cfdda9d7SSteve Wise struct cpl_close_listsvr_req *req; 1227cfdda9d7SSteve Wise 1228cfdda9d7SSteve Wise PDBG("%s ep %p\n", __func__, ep); 1229cfdda9d7SSteve Wise skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1230cfdda9d7SSteve Wise if (!skb) { 1231cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); 1232cfdda9d7SSteve Wise return -ENOMEM; 1233cfdda9d7SSteve Wise } 1234cfdda9d7SSteve Wise req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req)); 1235cfdda9d7SSteve Wise INIT_TP_WR(req, 0); 1236cfdda9d7SSteve Wise OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, 1237cfdda9d7SSteve Wise ep->stid)); 1238cfdda9d7SSteve Wise req->reply_ctrl = cpu_to_be16( 1239cfdda9d7SSteve Wise QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0])); 1240cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); 1241cfdda9d7SSteve Wise return c4iw_ofld_send(&ep->com.dev->rdev, skb); 1242cfdda9d7SSteve Wise } 1243cfdda9d7SSteve Wise 1244cfdda9d7SSteve Wise static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1245cfdda9d7SSteve Wise { 1246cfdda9d7SSteve Wise struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); 1247cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1248cfdda9d7SSteve Wise unsigned int stid = GET_TID(rpl); 1249cfdda9d7SSteve Wise struct c4iw_listen_ep *ep = lookup_stid(t, stid); 1250cfdda9d7SSteve Wise 1251cfdda9d7SSteve Wise PDBG("%s ep %p\n", __func__, ep); 1252cfdda9d7SSteve Wise ep->com.rpl_err = status2errno(rpl->status); 1253cfdda9d7SSteve Wise ep->com.rpl_done = 1; 1254cfdda9d7SSteve Wise wake_up(&ep->com.waitq); 1255cfdda9d7SSteve Wise return 0; 1256cfdda9d7SSteve Wise } 1257cfdda9d7SSteve Wise 1258cfdda9d7SSteve Wise static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb, 1259cfdda9d7SSteve Wise struct cpl_pass_accept_req *req) 1260cfdda9d7SSteve Wise { 1261cfdda9d7SSteve Wise struct cpl_pass_accept_rpl *rpl; 1262cfdda9d7SSteve Wise unsigned int mtu_idx; 1263cfdda9d7SSteve Wise u64 opt0; 1264cfdda9d7SSteve Wise u32 opt2; 1265cfdda9d7SSteve Wise int wscale; 1266cfdda9d7SSteve Wise 1267cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1268cfdda9d7SSteve Wise BUG_ON(skb_cloned(skb)); 1269cfdda9d7SSteve Wise skb_trim(skb, sizeof(*rpl)); 1270cfdda9d7SSteve Wise skb_get(skb); 1271cfdda9d7SSteve Wise cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 1272cfdda9d7SSteve Wise wscale = compute_wscale(rcv_win); 1273cfdda9d7SSteve Wise opt0 = KEEP_ALIVE(1) | 1274ba6d3925SSteve Wise DELACK(1) | 1275cfdda9d7SSteve Wise WND_SCALE(wscale) | 1276cfdda9d7SSteve Wise MSS_IDX(mtu_idx) | 1277cfdda9d7SSteve Wise L2T_IDX(ep->l2t->idx) | 1278cfdda9d7SSteve Wise TX_CHAN(ep->tx_chan) | 1279cfdda9d7SSteve Wise SMAC_SEL(ep->smac_idx) | 1280cfdda9d7SSteve Wise DSCP(ep->tos) | 1281cfdda9d7SSteve Wise RCV_BUFSIZ(rcv_win>>10); 1282cfdda9d7SSteve Wise opt2 = RX_CHANNEL(0) | 1283cfdda9d7SSteve Wise RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 1284cfdda9d7SSteve Wise 1285cfdda9d7SSteve Wise if (enable_tcp_timestamps && req->tcpopt.tstamp) 1286cfdda9d7SSteve Wise opt2 |= TSTAMPS_EN(1); 1287cfdda9d7SSteve Wise if (enable_tcp_sack && req->tcpopt.sack) 1288cfdda9d7SSteve Wise opt2 |= SACK_EN(1); 1289cfdda9d7SSteve Wise if (wscale && enable_tcp_window_scaling) 1290cfdda9d7SSteve Wise opt2 |= WND_SCALE_EN(1); 1291cfdda9d7SSteve Wise 1292cfdda9d7SSteve Wise rpl = cplhdr(skb); 1293cfdda9d7SSteve Wise INIT_TP_WR(rpl, ep->hwtid); 1294cfdda9d7SSteve Wise OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, 1295cfdda9d7SSteve Wise ep->hwtid)); 1296cfdda9d7SSteve Wise rpl->opt0 = cpu_to_be64(opt0); 1297cfdda9d7SSteve Wise rpl->opt2 = cpu_to_be32(opt2); 1298d4f1a5c6SSteve Wise set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 1299cfdda9d7SSteve Wise c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1300cfdda9d7SSteve Wise 1301cfdda9d7SSteve Wise return; 1302cfdda9d7SSteve Wise } 1303cfdda9d7SSteve Wise 1304cfdda9d7SSteve Wise static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip, 1305cfdda9d7SSteve Wise struct sk_buff *skb) 1306cfdda9d7SSteve Wise { 1307cfdda9d7SSteve Wise PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid, 1308cfdda9d7SSteve Wise peer_ip); 1309cfdda9d7SSteve Wise BUG_ON(skb_cloned(skb)); 1310cfdda9d7SSteve Wise skb_trim(skb, sizeof(struct cpl_tid_release)); 1311cfdda9d7SSteve Wise skb_get(skb); 1312cfdda9d7SSteve Wise release_tid(&dev->rdev, hwtid, skb); 1313cfdda9d7SSteve Wise return; 1314cfdda9d7SSteve Wise } 1315cfdda9d7SSteve Wise 1316cfdda9d7SSteve Wise static void get_4tuple(struct cpl_pass_accept_req *req, 1317cfdda9d7SSteve Wise __be32 *local_ip, __be32 *peer_ip, 1318cfdda9d7SSteve Wise __be16 *local_port, __be16 *peer_port) 1319cfdda9d7SSteve Wise { 1320cfdda9d7SSteve Wise int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len)); 1321cfdda9d7SSteve Wise int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len)); 1322cfdda9d7SSteve Wise struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len); 1323cfdda9d7SSteve Wise struct tcphdr *tcp = (struct tcphdr *) 1324cfdda9d7SSteve Wise ((u8 *)(req + 1) + eth_len + ip_len); 1325cfdda9d7SSteve Wise 1326cfdda9d7SSteve Wise PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__, 1327cfdda9d7SSteve Wise ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source), 1328cfdda9d7SSteve Wise ntohs(tcp->dest)); 1329cfdda9d7SSteve Wise 1330cfdda9d7SSteve Wise *peer_ip = ip->saddr; 1331cfdda9d7SSteve Wise *local_ip = ip->daddr; 1332cfdda9d7SSteve Wise *peer_port = tcp->source; 1333cfdda9d7SSteve Wise *local_port = tcp->dest; 1334cfdda9d7SSteve Wise 1335cfdda9d7SSteve Wise return; 1336cfdda9d7SSteve Wise } 1337cfdda9d7SSteve Wise 1338cfdda9d7SSteve Wise static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) 1339cfdda9d7SSteve Wise { 1340cfdda9d7SSteve Wise struct c4iw_ep *child_ep, *parent_ep; 1341cfdda9d7SSteve Wise struct cpl_pass_accept_req *req = cplhdr(skb); 1342cfdda9d7SSteve Wise unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); 1343cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1344cfdda9d7SSteve Wise unsigned int hwtid = GET_TID(req); 1345cfdda9d7SSteve Wise struct dst_entry *dst; 1346cfdda9d7SSteve Wise struct l2t_entry *l2t; 1347cfdda9d7SSteve Wise struct rtable *rt; 1348cfdda9d7SSteve Wise __be32 local_ip, peer_ip; 1349cfdda9d7SSteve Wise __be16 local_port, peer_port; 1350cfdda9d7SSteve Wise struct net_device *pdev; 1351cfdda9d7SSteve Wise u32 tx_chan, smac_idx; 1352cfdda9d7SSteve Wise u16 rss_qid; 1353cfdda9d7SSteve Wise u32 mtu; 1354cfdda9d7SSteve Wise int step; 1355d4f1a5c6SSteve Wise int txq_idx, ctrlq_idx; 1356cfdda9d7SSteve Wise 1357cfdda9d7SSteve Wise parent_ep = lookup_stid(t, stid); 1358cfdda9d7SSteve Wise PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid); 1359cfdda9d7SSteve Wise 1360cfdda9d7SSteve Wise get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port); 1361cfdda9d7SSteve Wise 1362cfdda9d7SSteve Wise if (state_read(&parent_ep->com) != LISTEN) { 1363cfdda9d7SSteve Wise printk(KERN_ERR "%s - listening ep not in LISTEN\n", 1364cfdda9d7SSteve Wise __func__); 1365cfdda9d7SSteve Wise goto reject; 1366cfdda9d7SSteve Wise } 1367cfdda9d7SSteve Wise 1368cfdda9d7SSteve Wise /* Find output route */ 1369cfdda9d7SSteve Wise rt = find_route(dev, local_ip, peer_ip, local_port, peer_port, 1370cfdda9d7SSteve Wise GET_POPEN_TOS(ntohl(req->tos_stid))); 1371cfdda9d7SSteve Wise if (!rt) { 1372cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - failed to find dst entry!\n", 1373cfdda9d7SSteve Wise __func__); 1374cfdda9d7SSteve Wise goto reject; 1375cfdda9d7SSteve Wise } 1376cfdda9d7SSteve Wise dst = &rt->u.dst; 1377cfdda9d7SSteve Wise if (dst->neighbour->dev->flags & IFF_LOOPBACK) { 1378cfdda9d7SSteve Wise pdev = ip_dev_find(&init_net, peer_ip); 1379cfdda9d7SSteve Wise BUG_ON(!pdev); 1380cfdda9d7SSteve Wise l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour, 1381cfdda9d7SSteve Wise pdev, 0); 1382cfdda9d7SSteve Wise mtu = pdev->mtu; 1383cfdda9d7SSteve Wise tx_chan = cxgb4_port_chan(pdev); 13842c5934bfSSteve Wise smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; 1385cfdda9d7SSteve Wise step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; 1386cfdda9d7SSteve Wise txq_idx = cxgb4_port_idx(pdev) * step; 1387d4f1a5c6SSteve Wise ctrlq_idx = cxgb4_port_idx(pdev); 1388cfdda9d7SSteve Wise step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; 1389cfdda9d7SSteve Wise rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step]; 1390cfdda9d7SSteve Wise dev_put(pdev); 1391cfdda9d7SSteve Wise } else { 1392cfdda9d7SSteve Wise l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour, 1393cfdda9d7SSteve Wise dst->neighbour->dev, 0); 1394cfdda9d7SSteve Wise mtu = dst_mtu(dst); 1395cfdda9d7SSteve Wise tx_chan = cxgb4_port_chan(dst->neighbour->dev); 13962c5934bfSSteve Wise smac_idx = (cxgb4_port_viid(dst->neighbour->dev) & 0x7F) << 1; 1397cfdda9d7SSteve Wise step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; 1398cfdda9d7SSteve Wise txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step; 1399d4f1a5c6SSteve Wise ctrlq_idx = cxgb4_port_idx(dst->neighbour->dev); 1400cfdda9d7SSteve Wise step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; 1401cfdda9d7SSteve Wise rss_qid = dev->rdev.lldi.rxq_ids[ 1402cfdda9d7SSteve Wise cxgb4_port_idx(dst->neighbour->dev) * step]; 1403cfdda9d7SSteve Wise } 1404cfdda9d7SSteve Wise if (!l2t) { 1405cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", 1406cfdda9d7SSteve Wise __func__); 1407cfdda9d7SSteve Wise dst_release(dst); 1408cfdda9d7SSteve Wise goto reject; 1409cfdda9d7SSteve Wise } 1410cfdda9d7SSteve Wise 1411cfdda9d7SSteve Wise child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); 1412cfdda9d7SSteve Wise if (!child_ep) { 1413cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", 1414cfdda9d7SSteve Wise __func__); 1415cfdda9d7SSteve Wise cxgb4_l2t_release(l2t); 1416cfdda9d7SSteve Wise dst_release(dst); 1417cfdda9d7SSteve Wise goto reject; 1418cfdda9d7SSteve Wise } 1419cfdda9d7SSteve Wise state_set(&child_ep->com, CONNECTING); 1420cfdda9d7SSteve Wise child_ep->com.dev = dev; 1421cfdda9d7SSteve Wise child_ep->com.cm_id = NULL; 1422cfdda9d7SSteve Wise child_ep->com.local_addr.sin_family = PF_INET; 1423cfdda9d7SSteve Wise child_ep->com.local_addr.sin_port = local_port; 1424cfdda9d7SSteve Wise child_ep->com.local_addr.sin_addr.s_addr = local_ip; 1425cfdda9d7SSteve Wise child_ep->com.remote_addr.sin_family = PF_INET; 1426cfdda9d7SSteve Wise child_ep->com.remote_addr.sin_port = peer_port; 1427cfdda9d7SSteve Wise child_ep->com.remote_addr.sin_addr.s_addr = peer_ip; 1428cfdda9d7SSteve Wise c4iw_get_ep(&parent_ep->com); 1429cfdda9d7SSteve Wise child_ep->parent_ep = parent_ep; 1430cfdda9d7SSteve Wise child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid)); 1431cfdda9d7SSteve Wise child_ep->l2t = l2t; 1432cfdda9d7SSteve Wise child_ep->dst = dst; 1433cfdda9d7SSteve Wise child_ep->hwtid = hwtid; 1434cfdda9d7SSteve Wise child_ep->tx_chan = tx_chan; 1435cfdda9d7SSteve Wise child_ep->smac_idx = smac_idx; 1436cfdda9d7SSteve Wise child_ep->rss_qid = rss_qid; 1437cfdda9d7SSteve Wise child_ep->mtu = mtu; 1438cfdda9d7SSteve Wise child_ep->txq_idx = txq_idx; 1439d4f1a5c6SSteve Wise child_ep->ctrlq_idx = ctrlq_idx; 1440cfdda9d7SSteve Wise 1441cfdda9d7SSteve Wise PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__, 1442cfdda9d7SSteve Wise tx_chan, smac_idx, rss_qid); 1443cfdda9d7SSteve Wise 1444cfdda9d7SSteve Wise init_timer(&child_ep->timer); 1445cfdda9d7SSteve Wise cxgb4_insert_tid(t, child_ep, hwtid); 1446cfdda9d7SSteve Wise accept_cr(child_ep, peer_ip, skb, req); 1447cfdda9d7SSteve Wise goto out; 1448cfdda9d7SSteve Wise reject: 1449cfdda9d7SSteve Wise reject_cr(dev, hwtid, peer_ip, skb); 1450cfdda9d7SSteve Wise out: 1451cfdda9d7SSteve Wise return 0; 1452cfdda9d7SSteve Wise } 1453cfdda9d7SSteve Wise 1454cfdda9d7SSteve Wise static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) 1455cfdda9d7SSteve Wise { 1456cfdda9d7SSteve Wise struct c4iw_ep *ep; 1457cfdda9d7SSteve Wise struct cpl_pass_establish *req = cplhdr(skb); 1458cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1459cfdda9d7SSteve Wise unsigned int tid = GET_TID(req); 1460cfdda9d7SSteve Wise 1461cfdda9d7SSteve Wise ep = lookup_tid(t, tid); 1462cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1463cfdda9d7SSteve Wise ep->snd_seq = be32_to_cpu(req->snd_isn); 1464cfdda9d7SSteve Wise ep->rcv_seq = be32_to_cpu(req->rcv_isn); 1465cfdda9d7SSteve Wise 1466cfdda9d7SSteve Wise set_emss(ep, ntohs(req->tcp_opt)); 1467cfdda9d7SSteve Wise 1468cfdda9d7SSteve Wise dst_confirm(ep->dst); 1469cfdda9d7SSteve Wise state_set(&ep->com, MPA_REQ_WAIT); 1470cfdda9d7SSteve Wise start_ep_timer(ep); 1471cfdda9d7SSteve Wise send_flowc(ep, skb); 1472cfdda9d7SSteve Wise 1473cfdda9d7SSteve Wise return 0; 1474cfdda9d7SSteve Wise } 1475cfdda9d7SSteve Wise 1476cfdda9d7SSteve Wise static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) 1477cfdda9d7SSteve Wise { 1478cfdda9d7SSteve Wise struct cpl_peer_close *hdr = cplhdr(skb); 1479cfdda9d7SSteve Wise struct c4iw_ep *ep; 1480cfdda9d7SSteve Wise struct c4iw_qp_attributes attrs; 1481cfdda9d7SSteve Wise unsigned long flags; 1482cfdda9d7SSteve Wise int disconnect = 1; 1483cfdda9d7SSteve Wise int release = 0; 1484cfdda9d7SSteve Wise int closing = 0; 1485cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1486cfdda9d7SSteve Wise unsigned int tid = GET_TID(hdr); 1487cfdda9d7SSteve Wise int start_timer = 0; 1488cfdda9d7SSteve Wise int stop_timer = 0; 1489cfdda9d7SSteve Wise 1490cfdda9d7SSteve Wise ep = lookup_tid(t, tid); 1491cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1492cfdda9d7SSteve Wise dst_confirm(ep->dst); 1493cfdda9d7SSteve Wise 1494cfdda9d7SSteve Wise spin_lock_irqsave(&ep->com.lock, flags); 1495cfdda9d7SSteve Wise switch (ep->com.state) { 1496cfdda9d7SSteve Wise case MPA_REQ_WAIT: 1497cfdda9d7SSteve Wise __state_set(&ep->com, CLOSING); 1498cfdda9d7SSteve Wise break; 1499cfdda9d7SSteve Wise case MPA_REQ_SENT: 1500cfdda9d7SSteve Wise __state_set(&ep->com, CLOSING); 1501cfdda9d7SSteve Wise connect_reply_upcall(ep, -ECONNRESET); 1502cfdda9d7SSteve Wise break; 1503cfdda9d7SSteve Wise case MPA_REQ_RCVD: 1504cfdda9d7SSteve Wise 1505cfdda9d7SSteve Wise /* 1506cfdda9d7SSteve Wise * We're gonna mark this puppy DEAD, but keep 1507cfdda9d7SSteve Wise * the reference on it until the ULP accepts or 1508cfdda9d7SSteve Wise * rejects the CR. Also wake up anyone waiting 1509cfdda9d7SSteve Wise * in rdma connection migration (see c4iw_accept_cr()). 1510cfdda9d7SSteve Wise */ 1511cfdda9d7SSteve Wise __state_set(&ep->com, CLOSING); 1512cfdda9d7SSteve Wise ep->com.rpl_done = 1; 1513cfdda9d7SSteve Wise ep->com.rpl_err = -ECONNRESET; 1514cfdda9d7SSteve Wise PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 1515cfdda9d7SSteve Wise wake_up(&ep->com.waitq); 1516cfdda9d7SSteve Wise break; 1517cfdda9d7SSteve Wise case MPA_REP_SENT: 1518cfdda9d7SSteve Wise __state_set(&ep->com, CLOSING); 1519cfdda9d7SSteve Wise ep->com.rpl_done = 1; 1520cfdda9d7SSteve Wise ep->com.rpl_err = -ECONNRESET; 1521cfdda9d7SSteve Wise PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 1522cfdda9d7SSteve Wise wake_up(&ep->com.waitq); 1523cfdda9d7SSteve Wise break; 1524cfdda9d7SSteve Wise case FPDU_MODE: 1525cfdda9d7SSteve Wise start_timer = 1; 1526cfdda9d7SSteve Wise __state_set(&ep->com, CLOSING); 1527cfdda9d7SSteve Wise closing = 1; 1528cfdda9d7SSteve Wise peer_close_upcall(ep); 1529cfdda9d7SSteve Wise break; 1530cfdda9d7SSteve Wise case ABORTING: 1531cfdda9d7SSteve Wise disconnect = 0; 1532cfdda9d7SSteve Wise break; 1533cfdda9d7SSteve Wise case CLOSING: 1534cfdda9d7SSteve Wise __state_set(&ep->com, MORIBUND); 1535cfdda9d7SSteve Wise disconnect = 0; 1536cfdda9d7SSteve Wise break; 1537cfdda9d7SSteve Wise case MORIBUND: 1538cfdda9d7SSteve Wise stop_timer = 1; 1539cfdda9d7SSteve Wise if (ep->com.cm_id && ep->com.qp) { 1540cfdda9d7SSteve Wise attrs.next_state = C4IW_QP_STATE_IDLE; 1541cfdda9d7SSteve Wise c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1542cfdda9d7SSteve Wise C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1543cfdda9d7SSteve Wise } 1544cfdda9d7SSteve Wise close_complete_upcall(ep); 1545cfdda9d7SSteve Wise __state_set(&ep->com, DEAD); 1546cfdda9d7SSteve Wise release = 1; 1547cfdda9d7SSteve Wise disconnect = 0; 1548cfdda9d7SSteve Wise break; 1549cfdda9d7SSteve Wise case DEAD: 1550cfdda9d7SSteve Wise disconnect = 0; 1551cfdda9d7SSteve Wise break; 1552cfdda9d7SSteve Wise default: 1553cfdda9d7SSteve Wise BUG_ON(1); 1554cfdda9d7SSteve Wise } 1555cfdda9d7SSteve Wise spin_unlock_irqrestore(&ep->com.lock, flags); 1556cfdda9d7SSteve Wise if (closing) { 1557cfdda9d7SSteve Wise attrs.next_state = C4IW_QP_STATE_CLOSING; 1558cfdda9d7SSteve Wise c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1559cfdda9d7SSteve Wise C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1560cfdda9d7SSteve Wise } 1561cfdda9d7SSteve Wise if (start_timer) 1562cfdda9d7SSteve Wise start_ep_timer(ep); 1563cfdda9d7SSteve Wise if (stop_timer) 1564cfdda9d7SSteve Wise stop_ep_timer(ep); 1565cfdda9d7SSteve Wise if (disconnect) 1566cfdda9d7SSteve Wise c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 1567cfdda9d7SSteve Wise if (release) 1568cfdda9d7SSteve Wise release_ep_resources(ep); 1569cfdda9d7SSteve Wise return 0; 1570cfdda9d7SSteve Wise } 1571cfdda9d7SSteve Wise 1572cfdda9d7SSteve Wise /* 1573cfdda9d7SSteve Wise * Returns whether an ABORT_REQ_RSS message is a negative advice. 1574cfdda9d7SSteve Wise */ 1575cfdda9d7SSteve Wise static int is_neg_adv_abort(unsigned int status) 1576cfdda9d7SSteve Wise { 1577cfdda9d7SSteve Wise return status == CPL_ERR_RTX_NEG_ADVICE || 1578cfdda9d7SSteve Wise status == CPL_ERR_PERSIST_NEG_ADVICE; 1579cfdda9d7SSteve Wise } 1580cfdda9d7SSteve Wise 1581cfdda9d7SSteve Wise static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) 1582cfdda9d7SSteve Wise { 1583cfdda9d7SSteve Wise struct cpl_abort_req_rss *req = cplhdr(skb); 1584cfdda9d7SSteve Wise struct c4iw_ep *ep; 1585cfdda9d7SSteve Wise struct cpl_abort_rpl *rpl; 1586cfdda9d7SSteve Wise struct sk_buff *rpl_skb; 1587cfdda9d7SSteve Wise struct c4iw_qp_attributes attrs; 1588cfdda9d7SSteve Wise int ret; 1589cfdda9d7SSteve Wise int release = 0; 1590cfdda9d7SSteve Wise unsigned long flags; 1591cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1592cfdda9d7SSteve Wise unsigned int tid = GET_TID(req); 1593cfdda9d7SSteve Wise int stop_timer = 0; 1594cfdda9d7SSteve Wise 1595cfdda9d7SSteve Wise ep = lookup_tid(t, tid); 1596cfdda9d7SSteve Wise if (is_neg_adv_abort(req->status)) { 1597cfdda9d7SSteve Wise PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, 1598cfdda9d7SSteve Wise ep->hwtid); 1599cfdda9d7SSteve Wise return 0; 1600cfdda9d7SSteve Wise } 1601cfdda9d7SSteve Wise spin_lock_irqsave(&ep->com.lock, flags); 1602cfdda9d7SSteve Wise PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 1603cfdda9d7SSteve Wise ep->com.state); 1604cfdda9d7SSteve Wise switch (ep->com.state) { 1605cfdda9d7SSteve Wise case CONNECTING: 1606cfdda9d7SSteve Wise break; 1607cfdda9d7SSteve Wise case MPA_REQ_WAIT: 1608cfdda9d7SSteve Wise stop_timer = 1; 1609cfdda9d7SSteve Wise break; 1610cfdda9d7SSteve Wise case MPA_REQ_SENT: 1611cfdda9d7SSteve Wise stop_timer = 1; 1612cfdda9d7SSteve Wise connect_reply_upcall(ep, -ECONNRESET); 1613cfdda9d7SSteve Wise break; 1614cfdda9d7SSteve Wise case MPA_REP_SENT: 1615cfdda9d7SSteve Wise ep->com.rpl_done = 1; 1616cfdda9d7SSteve Wise ep->com.rpl_err = -ECONNRESET; 1617cfdda9d7SSteve Wise PDBG("waking up ep %p\n", ep); 1618cfdda9d7SSteve Wise wake_up(&ep->com.waitq); 1619cfdda9d7SSteve Wise break; 1620cfdda9d7SSteve Wise case MPA_REQ_RCVD: 1621cfdda9d7SSteve Wise 1622cfdda9d7SSteve Wise /* 1623cfdda9d7SSteve Wise * We're gonna mark this puppy DEAD, but keep 1624cfdda9d7SSteve Wise * the reference on it until the ULP accepts or 1625cfdda9d7SSteve Wise * rejects the CR. Also wake up anyone waiting 1626cfdda9d7SSteve Wise * in rdma connection migration (see c4iw_accept_cr()). 1627cfdda9d7SSteve Wise */ 1628cfdda9d7SSteve Wise ep->com.rpl_done = 1; 1629cfdda9d7SSteve Wise ep->com.rpl_err = -ECONNRESET; 1630cfdda9d7SSteve Wise PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 1631cfdda9d7SSteve Wise wake_up(&ep->com.waitq); 1632cfdda9d7SSteve Wise break; 1633cfdda9d7SSteve Wise case MORIBUND: 1634cfdda9d7SSteve Wise case CLOSING: 1635cfdda9d7SSteve Wise stop_timer = 1; 1636cfdda9d7SSteve Wise /*FALLTHROUGH*/ 1637cfdda9d7SSteve Wise case FPDU_MODE: 1638cfdda9d7SSteve Wise if (ep->com.cm_id && ep->com.qp) { 1639cfdda9d7SSteve Wise attrs.next_state = C4IW_QP_STATE_ERROR; 1640cfdda9d7SSteve Wise ret = c4iw_modify_qp(ep->com.qp->rhp, 1641cfdda9d7SSteve Wise ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 1642cfdda9d7SSteve Wise &attrs, 1); 1643cfdda9d7SSteve Wise if (ret) 1644cfdda9d7SSteve Wise printk(KERN_ERR MOD 1645cfdda9d7SSteve Wise "%s - qp <- error failed!\n", 1646cfdda9d7SSteve Wise __func__); 1647cfdda9d7SSteve Wise } 1648cfdda9d7SSteve Wise peer_abort_upcall(ep); 1649cfdda9d7SSteve Wise break; 1650cfdda9d7SSteve Wise case ABORTING: 1651cfdda9d7SSteve Wise break; 1652cfdda9d7SSteve Wise case DEAD: 1653cfdda9d7SSteve Wise PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); 1654cfdda9d7SSteve Wise spin_unlock_irqrestore(&ep->com.lock, flags); 1655cfdda9d7SSteve Wise return 0; 1656cfdda9d7SSteve Wise default: 1657cfdda9d7SSteve Wise BUG_ON(1); 1658cfdda9d7SSteve Wise break; 1659cfdda9d7SSteve Wise } 1660cfdda9d7SSteve Wise dst_confirm(ep->dst); 1661cfdda9d7SSteve Wise if (ep->com.state != ABORTING) { 1662cfdda9d7SSteve Wise __state_set(&ep->com, DEAD); 1663cfdda9d7SSteve Wise release = 1; 1664cfdda9d7SSteve Wise } 1665cfdda9d7SSteve Wise spin_unlock_irqrestore(&ep->com.lock, flags); 1666cfdda9d7SSteve Wise 1667cfdda9d7SSteve Wise rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); 1668cfdda9d7SSteve Wise if (!rpl_skb) { 1669cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - cannot allocate skb!\n", 1670cfdda9d7SSteve Wise __func__); 1671cfdda9d7SSteve Wise release = 1; 1672cfdda9d7SSteve Wise goto out; 1673cfdda9d7SSteve Wise } 1674cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 1675cfdda9d7SSteve Wise rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); 1676cfdda9d7SSteve Wise INIT_TP_WR(rpl, ep->hwtid); 1677cfdda9d7SSteve Wise OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); 1678cfdda9d7SSteve Wise rpl->cmd = CPL_ABORT_NO_RST; 1679cfdda9d7SSteve Wise c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); 1680cfdda9d7SSteve Wise out: 1681cfdda9d7SSteve Wise if (stop_timer) 1682cfdda9d7SSteve Wise stop_ep_timer(ep); 1683cfdda9d7SSteve Wise if (release) 1684cfdda9d7SSteve Wise release_ep_resources(ep); 1685cfdda9d7SSteve Wise return 0; 1686cfdda9d7SSteve Wise } 1687cfdda9d7SSteve Wise 1688cfdda9d7SSteve Wise static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1689cfdda9d7SSteve Wise { 1690cfdda9d7SSteve Wise struct c4iw_ep *ep; 1691cfdda9d7SSteve Wise struct c4iw_qp_attributes attrs; 1692cfdda9d7SSteve Wise struct cpl_close_con_rpl *rpl = cplhdr(skb); 1693cfdda9d7SSteve Wise unsigned long flags; 1694cfdda9d7SSteve Wise int release = 0; 1695cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1696cfdda9d7SSteve Wise unsigned int tid = GET_TID(rpl); 1697cfdda9d7SSteve Wise int stop_timer = 0; 1698cfdda9d7SSteve Wise 1699cfdda9d7SSteve Wise ep = lookup_tid(t, tid); 1700cfdda9d7SSteve Wise 1701cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1702cfdda9d7SSteve Wise BUG_ON(!ep); 1703cfdda9d7SSteve Wise 1704cfdda9d7SSteve Wise /* The cm_id may be null if we failed to connect */ 1705cfdda9d7SSteve Wise spin_lock_irqsave(&ep->com.lock, flags); 1706cfdda9d7SSteve Wise switch (ep->com.state) { 1707cfdda9d7SSteve Wise case CLOSING: 1708cfdda9d7SSteve Wise __state_set(&ep->com, MORIBUND); 1709cfdda9d7SSteve Wise break; 1710cfdda9d7SSteve Wise case MORIBUND: 1711cfdda9d7SSteve Wise stop_timer = 1; 1712cfdda9d7SSteve Wise if ((ep->com.cm_id) && (ep->com.qp)) { 1713cfdda9d7SSteve Wise attrs.next_state = C4IW_QP_STATE_IDLE; 1714cfdda9d7SSteve Wise c4iw_modify_qp(ep->com.qp->rhp, 1715cfdda9d7SSteve Wise ep->com.qp, 1716cfdda9d7SSteve Wise C4IW_QP_ATTR_NEXT_STATE, 1717cfdda9d7SSteve Wise &attrs, 1); 1718cfdda9d7SSteve Wise } 1719cfdda9d7SSteve Wise close_complete_upcall(ep); 1720cfdda9d7SSteve Wise __state_set(&ep->com, DEAD); 1721cfdda9d7SSteve Wise release = 1; 1722cfdda9d7SSteve Wise break; 1723cfdda9d7SSteve Wise case ABORTING: 1724cfdda9d7SSteve Wise case DEAD: 1725cfdda9d7SSteve Wise break; 1726cfdda9d7SSteve Wise default: 1727cfdda9d7SSteve Wise BUG_ON(1); 1728cfdda9d7SSteve Wise break; 1729cfdda9d7SSteve Wise } 1730cfdda9d7SSteve Wise spin_unlock_irqrestore(&ep->com.lock, flags); 1731cfdda9d7SSteve Wise if (stop_timer) 1732cfdda9d7SSteve Wise stop_ep_timer(ep); 1733cfdda9d7SSteve Wise if (release) 1734cfdda9d7SSteve Wise release_ep_resources(ep); 1735cfdda9d7SSteve Wise return 0; 1736cfdda9d7SSteve Wise } 1737cfdda9d7SSteve Wise 1738cfdda9d7SSteve Wise static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) 1739cfdda9d7SSteve Wise { 1740cfdda9d7SSteve Wise struct c4iw_ep *ep; 1741cfdda9d7SSteve Wise struct cpl_rdma_terminate *term = cplhdr(skb); 1742cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1743cfdda9d7SSteve Wise unsigned int tid = GET_TID(term); 1744cfdda9d7SSteve Wise 1745cfdda9d7SSteve Wise ep = lookup_tid(t, tid); 1746cfdda9d7SSteve Wise 1747cfdda9d7SSteve Wise if (state_read(&ep->com) != FPDU_MODE) 1748cfdda9d7SSteve Wise return 0; 1749cfdda9d7SSteve Wise 1750cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1751cfdda9d7SSteve Wise skb_pull(skb, sizeof *term); 1752cfdda9d7SSteve Wise PDBG("%s saving %d bytes of term msg\n", __func__, skb->len); 1753cfdda9d7SSteve Wise skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer, 1754cfdda9d7SSteve Wise skb->len); 1755cfdda9d7SSteve Wise ep->com.qp->attr.terminate_msg_len = skb->len; 1756cfdda9d7SSteve Wise ep->com.qp->attr.is_terminate_local = 0; 1757cfdda9d7SSteve Wise return 0; 1758cfdda9d7SSteve Wise } 1759cfdda9d7SSteve Wise 1760cfdda9d7SSteve Wise /* 1761cfdda9d7SSteve Wise * Upcall from the adapter indicating data has been transmitted. 1762cfdda9d7SSteve Wise * For us its just the single MPA request or reply. We can now free 1763cfdda9d7SSteve Wise * the skb holding the mpa message. 1764cfdda9d7SSteve Wise */ 1765cfdda9d7SSteve Wise static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) 1766cfdda9d7SSteve Wise { 1767cfdda9d7SSteve Wise struct c4iw_ep *ep; 1768cfdda9d7SSteve Wise struct cpl_fw4_ack *hdr = cplhdr(skb); 1769cfdda9d7SSteve Wise u8 credits = hdr->credits; 1770cfdda9d7SSteve Wise unsigned int tid = GET_TID(hdr); 1771cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1772cfdda9d7SSteve Wise 1773cfdda9d7SSteve Wise 1774cfdda9d7SSteve Wise ep = lookup_tid(t, tid); 1775cfdda9d7SSteve Wise PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); 1776cfdda9d7SSteve Wise if (credits == 0) { 1777cfdda9d7SSteve Wise PDBG(KERN_ERR "%s 0 credit ack ep %p tid %u state %u\n", 1778cfdda9d7SSteve Wise __func__, ep, ep->hwtid, state_read(&ep->com)); 1779cfdda9d7SSteve Wise return 0; 1780cfdda9d7SSteve Wise } 1781cfdda9d7SSteve Wise 1782cfdda9d7SSteve Wise dst_confirm(ep->dst); 1783cfdda9d7SSteve Wise if (ep->mpa_skb) { 1784cfdda9d7SSteve Wise PDBG("%s last streaming msg ack ep %p tid %u state %u " 1785cfdda9d7SSteve Wise "initiator %u freeing skb\n", __func__, ep, ep->hwtid, 1786cfdda9d7SSteve Wise state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); 1787cfdda9d7SSteve Wise kfree_skb(ep->mpa_skb); 1788cfdda9d7SSteve Wise ep->mpa_skb = NULL; 1789cfdda9d7SSteve Wise } 1790cfdda9d7SSteve Wise return 0; 1791cfdda9d7SSteve Wise } 1792cfdda9d7SSteve Wise 1793cfdda9d7SSteve Wise int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 1794cfdda9d7SSteve Wise { 1795cfdda9d7SSteve Wise int err; 1796cfdda9d7SSteve Wise struct c4iw_ep *ep = to_ep(cm_id); 1797cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1798cfdda9d7SSteve Wise 1799cfdda9d7SSteve Wise if (state_read(&ep->com) == DEAD) { 1800cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 1801cfdda9d7SSteve Wise return -ECONNRESET; 1802cfdda9d7SSteve Wise } 1803cfdda9d7SSteve Wise BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 1804cfdda9d7SSteve Wise if (mpa_rev == 0) 1805cfdda9d7SSteve Wise abort_connection(ep, NULL, GFP_KERNEL); 1806cfdda9d7SSteve Wise else { 1807cfdda9d7SSteve Wise err = send_mpa_reject(ep, pdata, pdata_len); 1808cfdda9d7SSteve Wise err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 1809cfdda9d7SSteve Wise } 1810cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 1811cfdda9d7SSteve Wise return 0; 1812cfdda9d7SSteve Wise } 1813cfdda9d7SSteve Wise 1814cfdda9d7SSteve Wise int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 1815cfdda9d7SSteve Wise { 1816cfdda9d7SSteve Wise int err; 1817cfdda9d7SSteve Wise struct c4iw_qp_attributes attrs; 1818cfdda9d7SSteve Wise enum c4iw_qp_attr_mask mask; 1819cfdda9d7SSteve Wise struct c4iw_ep *ep = to_ep(cm_id); 1820cfdda9d7SSteve Wise struct c4iw_dev *h = to_c4iw_dev(cm_id->device); 1821cfdda9d7SSteve Wise struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); 1822cfdda9d7SSteve Wise 1823cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1824cfdda9d7SSteve Wise if (state_read(&ep->com) == DEAD) { 1825cfdda9d7SSteve Wise err = -ECONNRESET; 1826cfdda9d7SSteve Wise goto err; 1827cfdda9d7SSteve Wise } 1828cfdda9d7SSteve Wise 1829cfdda9d7SSteve Wise BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 1830cfdda9d7SSteve Wise BUG_ON(!qp); 1831cfdda9d7SSteve Wise 1832be4c9badSRoland Dreier if ((conn_param->ord > c4iw_max_read_depth) || 1833be4c9badSRoland Dreier (conn_param->ird > c4iw_max_read_depth)) { 1834cfdda9d7SSteve Wise abort_connection(ep, NULL, GFP_KERNEL); 1835cfdda9d7SSteve Wise err = -EINVAL; 1836cfdda9d7SSteve Wise goto err; 1837cfdda9d7SSteve Wise } 1838cfdda9d7SSteve Wise 1839cfdda9d7SSteve Wise cm_id->add_ref(cm_id); 1840cfdda9d7SSteve Wise ep->com.cm_id = cm_id; 1841cfdda9d7SSteve Wise ep->com.qp = qp; 1842cfdda9d7SSteve Wise 1843cfdda9d7SSteve Wise ep->ird = conn_param->ird; 1844cfdda9d7SSteve Wise ep->ord = conn_param->ord; 1845cfdda9d7SSteve Wise 1846cfdda9d7SSteve Wise if (peer2peer && ep->ird == 0) 1847cfdda9d7SSteve Wise ep->ird = 1; 1848cfdda9d7SSteve Wise 1849cfdda9d7SSteve Wise PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); 1850cfdda9d7SSteve Wise 1851cfdda9d7SSteve Wise /* bind QP to EP and move to RTS */ 1852cfdda9d7SSteve Wise attrs.mpa_attr = ep->mpa_attr; 1853cfdda9d7SSteve Wise attrs.max_ird = ep->ird; 1854cfdda9d7SSteve Wise attrs.max_ord = ep->ord; 1855cfdda9d7SSteve Wise attrs.llp_stream_handle = ep; 1856cfdda9d7SSteve Wise attrs.next_state = C4IW_QP_STATE_RTS; 1857cfdda9d7SSteve Wise 1858cfdda9d7SSteve Wise /* bind QP and TID with INIT_WR */ 1859cfdda9d7SSteve Wise mask = C4IW_QP_ATTR_NEXT_STATE | 1860cfdda9d7SSteve Wise C4IW_QP_ATTR_LLP_STREAM_HANDLE | 1861cfdda9d7SSteve Wise C4IW_QP_ATTR_MPA_ATTR | 1862cfdda9d7SSteve Wise C4IW_QP_ATTR_MAX_IRD | 1863cfdda9d7SSteve Wise C4IW_QP_ATTR_MAX_ORD; 1864cfdda9d7SSteve Wise 1865cfdda9d7SSteve Wise err = c4iw_modify_qp(ep->com.qp->rhp, 1866cfdda9d7SSteve Wise ep->com.qp, mask, &attrs, 1); 1867cfdda9d7SSteve Wise if (err) 1868cfdda9d7SSteve Wise goto err1; 1869cfdda9d7SSteve Wise err = send_mpa_reply(ep, conn_param->private_data, 1870cfdda9d7SSteve Wise conn_param->private_data_len); 1871cfdda9d7SSteve Wise if (err) 1872cfdda9d7SSteve Wise goto err1; 1873cfdda9d7SSteve Wise 1874cfdda9d7SSteve Wise state_set(&ep->com, FPDU_MODE); 1875cfdda9d7SSteve Wise established_upcall(ep); 1876cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 1877cfdda9d7SSteve Wise return 0; 1878cfdda9d7SSteve Wise err1: 1879cfdda9d7SSteve Wise ep->com.cm_id = NULL; 1880cfdda9d7SSteve Wise ep->com.qp = NULL; 1881cfdda9d7SSteve Wise cm_id->rem_ref(cm_id); 1882cfdda9d7SSteve Wise err: 1883cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 1884cfdda9d7SSteve Wise return err; 1885cfdda9d7SSteve Wise } 1886cfdda9d7SSteve Wise 1887cfdda9d7SSteve Wise int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 1888cfdda9d7SSteve Wise { 1889cfdda9d7SSteve Wise int err = 0; 1890cfdda9d7SSteve Wise struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 1891cfdda9d7SSteve Wise struct c4iw_ep *ep; 1892cfdda9d7SSteve Wise struct rtable *rt; 1893cfdda9d7SSteve Wise struct net_device *pdev; 1894cfdda9d7SSteve Wise int step; 1895cfdda9d7SSteve Wise 1896be4c9badSRoland Dreier if ((conn_param->ord > c4iw_max_read_depth) || 1897be4c9badSRoland Dreier (conn_param->ird > c4iw_max_read_depth)) { 1898be4c9badSRoland Dreier err = -EINVAL; 1899be4c9badSRoland Dreier goto out; 1900be4c9badSRoland Dreier } 1901cfdda9d7SSteve Wise ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 1902cfdda9d7SSteve Wise if (!ep) { 1903cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 1904cfdda9d7SSteve Wise err = -ENOMEM; 1905cfdda9d7SSteve Wise goto out; 1906cfdda9d7SSteve Wise } 1907cfdda9d7SSteve Wise init_timer(&ep->timer); 1908cfdda9d7SSteve Wise ep->plen = conn_param->private_data_len; 1909cfdda9d7SSteve Wise if (ep->plen) 1910cfdda9d7SSteve Wise memcpy(ep->mpa_pkt + sizeof(struct mpa_message), 1911cfdda9d7SSteve Wise conn_param->private_data, ep->plen); 1912cfdda9d7SSteve Wise ep->ird = conn_param->ird; 1913cfdda9d7SSteve Wise ep->ord = conn_param->ord; 1914cfdda9d7SSteve Wise 1915cfdda9d7SSteve Wise if (peer2peer && ep->ord == 0) 1916cfdda9d7SSteve Wise ep->ord = 1; 1917cfdda9d7SSteve Wise 1918cfdda9d7SSteve Wise cm_id->add_ref(cm_id); 1919cfdda9d7SSteve Wise ep->com.dev = dev; 1920cfdda9d7SSteve Wise ep->com.cm_id = cm_id; 1921cfdda9d7SSteve Wise ep->com.qp = get_qhp(dev, conn_param->qpn); 1922cfdda9d7SSteve Wise BUG_ON(!ep->com.qp); 1923cfdda9d7SSteve Wise PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, 1924cfdda9d7SSteve Wise ep->com.qp, cm_id); 1925cfdda9d7SSteve Wise 1926cfdda9d7SSteve Wise /* 1927cfdda9d7SSteve Wise * Allocate an active TID to initiate a TCP connection. 1928cfdda9d7SSteve Wise */ 1929cfdda9d7SSteve Wise ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep); 1930cfdda9d7SSteve Wise if (ep->atid == -1) { 1931cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); 1932cfdda9d7SSteve Wise err = -ENOMEM; 1933cfdda9d7SSteve Wise goto fail2; 1934cfdda9d7SSteve Wise } 1935cfdda9d7SSteve Wise 1936cfdda9d7SSteve Wise PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__, 1937cfdda9d7SSteve Wise ntohl(cm_id->local_addr.sin_addr.s_addr), 1938cfdda9d7SSteve Wise ntohs(cm_id->local_addr.sin_port), 1939cfdda9d7SSteve Wise ntohl(cm_id->remote_addr.sin_addr.s_addr), 1940cfdda9d7SSteve Wise ntohs(cm_id->remote_addr.sin_port)); 1941cfdda9d7SSteve Wise 1942cfdda9d7SSteve Wise /* find a route */ 1943cfdda9d7SSteve Wise rt = find_route(dev, 1944cfdda9d7SSteve Wise cm_id->local_addr.sin_addr.s_addr, 1945cfdda9d7SSteve Wise cm_id->remote_addr.sin_addr.s_addr, 1946cfdda9d7SSteve Wise cm_id->local_addr.sin_port, 1947cfdda9d7SSteve Wise cm_id->remote_addr.sin_port, 0); 1948cfdda9d7SSteve Wise if (!rt) { 1949cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); 1950cfdda9d7SSteve Wise err = -EHOSTUNREACH; 1951cfdda9d7SSteve Wise goto fail3; 1952cfdda9d7SSteve Wise } 1953cfdda9d7SSteve Wise ep->dst = &rt->u.dst; 1954cfdda9d7SSteve Wise 1955cfdda9d7SSteve Wise /* get a l2t entry */ 1956cfdda9d7SSteve Wise if (ep->dst->neighbour->dev->flags & IFF_LOOPBACK) { 1957cfdda9d7SSteve Wise PDBG("%s LOOPBACK\n", __func__); 1958cfdda9d7SSteve Wise pdev = ip_dev_find(&init_net, 1959cfdda9d7SSteve Wise cm_id->remote_addr.sin_addr.s_addr); 1960cfdda9d7SSteve Wise ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, 1961cfdda9d7SSteve Wise ep->dst->neighbour, 1962cfdda9d7SSteve Wise pdev, 0); 1963cfdda9d7SSteve Wise ep->mtu = pdev->mtu; 1964cfdda9d7SSteve Wise ep->tx_chan = cxgb4_port_chan(pdev); 19652c5934bfSSteve Wise ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; 1966cfdda9d7SSteve Wise step = ep->com.dev->rdev.lldi.ntxq / 1967cfdda9d7SSteve Wise ep->com.dev->rdev.lldi.nchan; 1968cfdda9d7SSteve Wise ep->txq_idx = cxgb4_port_idx(pdev) * step; 1969cfdda9d7SSteve Wise step = ep->com.dev->rdev.lldi.nrxq / 1970cfdda9d7SSteve Wise ep->com.dev->rdev.lldi.nchan; 1971d4f1a5c6SSteve Wise ep->ctrlq_idx = cxgb4_port_idx(pdev); 1972cfdda9d7SSteve Wise ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ 1973cfdda9d7SSteve Wise cxgb4_port_idx(pdev) * step]; 1974cfdda9d7SSteve Wise dev_put(pdev); 1975cfdda9d7SSteve Wise } else { 1976cfdda9d7SSteve Wise ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, 1977cfdda9d7SSteve Wise ep->dst->neighbour, 1978cfdda9d7SSteve Wise ep->dst->neighbour->dev, 0); 1979cfdda9d7SSteve Wise ep->mtu = dst_mtu(ep->dst); 1980cfdda9d7SSteve Wise ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev); 19812c5934bfSSteve Wise ep->smac_idx = (cxgb4_port_viid(ep->dst->neighbour->dev) & 19822c5934bfSSteve Wise 0x7F) << 1; 1983cfdda9d7SSteve Wise step = ep->com.dev->rdev.lldi.ntxq / 1984cfdda9d7SSteve Wise ep->com.dev->rdev.lldi.nchan; 1985cfdda9d7SSteve Wise ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step; 1986d4f1a5c6SSteve Wise ep->ctrlq_idx = cxgb4_port_idx(ep->dst->neighbour->dev); 1987cfdda9d7SSteve Wise step = ep->com.dev->rdev.lldi.nrxq / 1988cfdda9d7SSteve Wise ep->com.dev->rdev.lldi.nchan; 1989cfdda9d7SSteve Wise ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ 1990cfdda9d7SSteve Wise cxgb4_port_idx(ep->dst->neighbour->dev) * step]; 1991cfdda9d7SSteve Wise } 1992cfdda9d7SSteve Wise if (!ep->l2t) { 1993cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 1994cfdda9d7SSteve Wise err = -ENOMEM; 1995cfdda9d7SSteve Wise goto fail4; 1996cfdda9d7SSteve Wise } 1997cfdda9d7SSteve Wise 1998cfdda9d7SSteve Wise PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 1999cfdda9d7SSteve Wise __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 2000cfdda9d7SSteve Wise ep->l2t->idx); 2001cfdda9d7SSteve Wise 2002cfdda9d7SSteve Wise state_set(&ep->com, CONNECTING); 2003cfdda9d7SSteve Wise ep->tos = 0; 2004cfdda9d7SSteve Wise ep->com.local_addr = cm_id->local_addr; 2005cfdda9d7SSteve Wise ep->com.remote_addr = cm_id->remote_addr; 2006cfdda9d7SSteve Wise 2007cfdda9d7SSteve Wise /* send connect request to rnic */ 2008cfdda9d7SSteve Wise err = send_connect(ep); 2009cfdda9d7SSteve Wise if (!err) 2010cfdda9d7SSteve Wise goto out; 2011cfdda9d7SSteve Wise 2012cfdda9d7SSteve Wise cxgb4_l2t_release(ep->l2t); 2013cfdda9d7SSteve Wise fail4: 2014cfdda9d7SSteve Wise dst_release(ep->dst); 2015cfdda9d7SSteve Wise fail3: 2016cfdda9d7SSteve Wise cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 2017cfdda9d7SSteve Wise fail2: 2018cfdda9d7SSteve Wise cm_id->rem_ref(cm_id); 2019cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 2020cfdda9d7SSteve Wise out: 2021cfdda9d7SSteve Wise return err; 2022cfdda9d7SSteve Wise } 2023cfdda9d7SSteve Wise 2024cfdda9d7SSteve Wise int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) 2025cfdda9d7SSteve Wise { 2026cfdda9d7SSteve Wise int err = 0; 2027cfdda9d7SSteve Wise struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2028cfdda9d7SSteve Wise struct c4iw_listen_ep *ep; 2029cfdda9d7SSteve Wise 2030cfdda9d7SSteve Wise 2031cfdda9d7SSteve Wise might_sleep(); 2032cfdda9d7SSteve Wise 2033cfdda9d7SSteve Wise ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 2034cfdda9d7SSteve Wise if (!ep) { 2035cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 2036cfdda9d7SSteve Wise err = -ENOMEM; 2037cfdda9d7SSteve Wise goto fail1; 2038cfdda9d7SSteve Wise } 2039cfdda9d7SSteve Wise PDBG("%s ep %p\n", __func__, ep); 2040cfdda9d7SSteve Wise cm_id->add_ref(cm_id); 2041cfdda9d7SSteve Wise ep->com.cm_id = cm_id; 2042cfdda9d7SSteve Wise ep->com.dev = dev; 2043cfdda9d7SSteve Wise ep->backlog = backlog; 2044cfdda9d7SSteve Wise ep->com.local_addr = cm_id->local_addr; 2045cfdda9d7SSteve Wise 2046cfdda9d7SSteve Wise /* 2047cfdda9d7SSteve Wise * Allocate a server TID. 2048cfdda9d7SSteve Wise */ 2049cfdda9d7SSteve Wise ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep); 2050cfdda9d7SSteve Wise if (ep->stid == -1) { 2051be4c9badSRoland Dreier printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__); 2052cfdda9d7SSteve Wise err = -ENOMEM; 2053cfdda9d7SSteve Wise goto fail2; 2054cfdda9d7SSteve Wise } 2055cfdda9d7SSteve Wise 2056cfdda9d7SSteve Wise state_set(&ep->com, LISTEN); 2057cfdda9d7SSteve Wise err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid, 2058cfdda9d7SSteve Wise ep->com.local_addr.sin_addr.s_addr, 2059cfdda9d7SSteve Wise ep->com.local_addr.sin_port, 2060cfdda9d7SSteve Wise ep->com.dev->rdev.lldi.rxq_ids[0]); 2061cfdda9d7SSteve Wise if (err) 2062cfdda9d7SSteve Wise goto fail3; 2063cfdda9d7SSteve Wise 2064cfdda9d7SSteve Wise /* wait for pass_open_rpl */ 2065cfdda9d7SSteve Wise wait_event(ep->com.waitq, ep->com.rpl_done); 2066cfdda9d7SSteve Wise err = ep->com.rpl_err; 2067cfdda9d7SSteve Wise if (!err) { 2068cfdda9d7SSteve Wise cm_id->provider_data = ep; 2069cfdda9d7SSteve Wise goto out; 2070cfdda9d7SSteve Wise } 2071cfdda9d7SSteve Wise fail3: 2072cfdda9d7SSteve Wise cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); 2073cfdda9d7SSteve Wise fail2: 2074cfdda9d7SSteve Wise cm_id->rem_ref(cm_id); 2075cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 2076cfdda9d7SSteve Wise fail1: 2077cfdda9d7SSteve Wise out: 2078cfdda9d7SSteve Wise return err; 2079cfdda9d7SSteve Wise } 2080cfdda9d7SSteve Wise 2081cfdda9d7SSteve Wise int c4iw_destroy_listen(struct iw_cm_id *cm_id) 2082cfdda9d7SSteve Wise { 2083cfdda9d7SSteve Wise int err; 2084cfdda9d7SSteve Wise struct c4iw_listen_ep *ep = to_listen_ep(cm_id); 2085cfdda9d7SSteve Wise 2086cfdda9d7SSteve Wise PDBG("%s ep %p\n", __func__, ep); 2087cfdda9d7SSteve Wise 2088cfdda9d7SSteve Wise might_sleep(); 2089cfdda9d7SSteve Wise state_set(&ep->com, DEAD); 2090cfdda9d7SSteve Wise ep->com.rpl_done = 0; 2091cfdda9d7SSteve Wise ep->com.rpl_err = 0; 2092cfdda9d7SSteve Wise err = listen_stop(ep); 2093cfdda9d7SSteve Wise if (err) 2094cfdda9d7SSteve Wise goto done; 2095cfdda9d7SSteve Wise wait_event(ep->com.waitq, ep->com.rpl_done); 2096cfdda9d7SSteve Wise cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); 2097cfdda9d7SSteve Wise done: 2098cfdda9d7SSteve Wise err = ep->com.rpl_err; 2099cfdda9d7SSteve Wise cm_id->rem_ref(cm_id); 2100cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 2101cfdda9d7SSteve Wise return err; 2102cfdda9d7SSteve Wise } 2103cfdda9d7SSteve Wise 2104cfdda9d7SSteve Wise int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) 2105cfdda9d7SSteve Wise { 2106cfdda9d7SSteve Wise int ret = 0; 2107cfdda9d7SSteve Wise unsigned long flags; 2108cfdda9d7SSteve Wise int close = 0; 2109cfdda9d7SSteve Wise int fatal = 0; 2110cfdda9d7SSteve Wise struct c4iw_rdev *rdev; 2111cfdda9d7SSteve Wise int start_timer = 0; 2112cfdda9d7SSteve Wise int stop_timer = 0; 2113cfdda9d7SSteve Wise 2114cfdda9d7SSteve Wise spin_lock_irqsave(&ep->com.lock, flags); 2115cfdda9d7SSteve Wise 2116cfdda9d7SSteve Wise PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, 2117cfdda9d7SSteve Wise states[ep->com.state], abrupt); 2118cfdda9d7SSteve Wise 2119cfdda9d7SSteve Wise rdev = &ep->com.dev->rdev; 2120cfdda9d7SSteve Wise if (c4iw_fatal_error(rdev)) { 2121cfdda9d7SSteve Wise fatal = 1; 2122cfdda9d7SSteve Wise close_complete_upcall(ep); 2123cfdda9d7SSteve Wise ep->com.state = DEAD; 2124cfdda9d7SSteve Wise } 2125cfdda9d7SSteve Wise switch (ep->com.state) { 2126cfdda9d7SSteve Wise case MPA_REQ_WAIT: 2127cfdda9d7SSteve Wise case MPA_REQ_SENT: 2128cfdda9d7SSteve Wise case MPA_REQ_RCVD: 2129cfdda9d7SSteve Wise case MPA_REP_SENT: 2130cfdda9d7SSteve Wise case FPDU_MODE: 2131cfdda9d7SSteve Wise close = 1; 2132cfdda9d7SSteve Wise if (abrupt) 2133cfdda9d7SSteve Wise ep->com.state = ABORTING; 2134cfdda9d7SSteve Wise else { 2135cfdda9d7SSteve Wise ep->com.state = CLOSING; 2136cfdda9d7SSteve Wise start_timer = 1; 2137cfdda9d7SSteve Wise } 2138cfdda9d7SSteve Wise set_bit(CLOSE_SENT, &ep->com.flags); 2139cfdda9d7SSteve Wise break; 2140cfdda9d7SSteve Wise case CLOSING: 2141cfdda9d7SSteve Wise if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { 2142cfdda9d7SSteve Wise close = 1; 2143cfdda9d7SSteve Wise if (abrupt) { 2144cfdda9d7SSteve Wise stop_timer = 1; 2145cfdda9d7SSteve Wise ep->com.state = ABORTING; 2146cfdda9d7SSteve Wise } else 2147cfdda9d7SSteve Wise ep->com.state = MORIBUND; 2148cfdda9d7SSteve Wise } 2149cfdda9d7SSteve Wise break; 2150cfdda9d7SSteve Wise case MORIBUND: 2151cfdda9d7SSteve Wise case ABORTING: 2152cfdda9d7SSteve Wise case DEAD: 2153cfdda9d7SSteve Wise PDBG("%s ignoring disconnect ep %p state %u\n", 2154cfdda9d7SSteve Wise __func__, ep, ep->com.state); 2155cfdda9d7SSteve Wise break; 2156cfdda9d7SSteve Wise default: 2157cfdda9d7SSteve Wise BUG(); 2158cfdda9d7SSteve Wise break; 2159cfdda9d7SSteve Wise } 2160cfdda9d7SSteve Wise 2161cfdda9d7SSteve Wise spin_unlock_irqrestore(&ep->com.lock, flags); 2162cfdda9d7SSteve Wise if (start_timer) 2163cfdda9d7SSteve Wise start_ep_timer(ep); 2164cfdda9d7SSteve Wise if (stop_timer) 2165cfdda9d7SSteve Wise stop_ep_timer(ep); 2166cfdda9d7SSteve Wise if (close) { 2167cfdda9d7SSteve Wise if (abrupt) 2168cfdda9d7SSteve Wise ret = abort_connection(ep, NULL, gfp); 2169cfdda9d7SSteve Wise else 2170cfdda9d7SSteve Wise ret = send_halfclose(ep, gfp); 2171cfdda9d7SSteve Wise if (ret) 2172cfdda9d7SSteve Wise fatal = 1; 2173cfdda9d7SSteve Wise } 2174cfdda9d7SSteve Wise if (fatal) 2175cfdda9d7SSteve Wise release_ep_resources(ep); 2176cfdda9d7SSteve Wise return ret; 2177cfdda9d7SSteve Wise } 2178cfdda9d7SSteve Wise 2179cfdda9d7SSteve Wise /* 2180be4c9badSRoland Dreier * These are the real handlers that are called from a 2181be4c9badSRoland Dreier * work queue. 2182be4c9badSRoland Dreier */ 2183be4c9badSRoland Dreier static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = { 2184be4c9badSRoland Dreier [CPL_ACT_ESTABLISH] = act_establish, 2185be4c9badSRoland Dreier [CPL_ACT_OPEN_RPL] = act_open_rpl, 2186be4c9badSRoland Dreier [CPL_RX_DATA] = rx_data, 2187be4c9badSRoland Dreier [CPL_ABORT_RPL_RSS] = abort_rpl, 2188be4c9badSRoland Dreier [CPL_ABORT_RPL] = abort_rpl, 2189be4c9badSRoland Dreier [CPL_PASS_OPEN_RPL] = pass_open_rpl, 2190be4c9badSRoland Dreier [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl, 2191be4c9badSRoland Dreier [CPL_PASS_ACCEPT_REQ] = pass_accept_req, 2192be4c9badSRoland Dreier [CPL_PASS_ESTABLISH] = pass_establish, 2193be4c9badSRoland Dreier [CPL_PEER_CLOSE] = peer_close, 2194be4c9badSRoland Dreier [CPL_ABORT_REQ_RSS] = peer_abort, 2195be4c9badSRoland Dreier [CPL_CLOSE_CON_RPL] = close_con_rpl, 2196be4c9badSRoland Dreier [CPL_RDMA_TERMINATE] = terminate, 2197be4c9badSRoland Dreier [CPL_FW4_ACK] = fw4_ack 2198be4c9badSRoland Dreier }; 2199be4c9badSRoland Dreier 2200be4c9badSRoland Dreier static void process_timeout(struct c4iw_ep *ep) 2201be4c9badSRoland Dreier { 2202be4c9badSRoland Dreier struct c4iw_qp_attributes attrs; 2203be4c9badSRoland Dreier int abort = 1; 2204be4c9badSRoland Dreier 2205be4c9badSRoland Dreier spin_lock_irq(&ep->com.lock); 2206be4c9badSRoland Dreier PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, 2207be4c9badSRoland Dreier ep->com.state); 2208be4c9badSRoland Dreier switch (ep->com.state) { 2209be4c9badSRoland Dreier case MPA_REQ_SENT: 2210be4c9badSRoland Dreier __state_set(&ep->com, ABORTING); 2211be4c9badSRoland Dreier connect_reply_upcall(ep, -ETIMEDOUT); 2212be4c9badSRoland Dreier break; 2213be4c9badSRoland Dreier case MPA_REQ_WAIT: 2214be4c9badSRoland Dreier __state_set(&ep->com, ABORTING); 2215be4c9badSRoland Dreier break; 2216be4c9badSRoland Dreier case CLOSING: 2217be4c9badSRoland Dreier case MORIBUND: 2218be4c9badSRoland Dreier if (ep->com.cm_id && ep->com.qp) { 2219be4c9badSRoland Dreier attrs.next_state = C4IW_QP_STATE_ERROR; 2220be4c9badSRoland Dreier c4iw_modify_qp(ep->com.qp->rhp, 2221be4c9badSRoland Dreier ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 2222be4c9badSRoland Dreier &attrs, 1); 2223be4c9badSRoland Dreier } 2224be4c9badSRoland Dreier __state_set(&ep->com, ABORTING); 2225be4c9badSRoland Dreier break; 2226be4c9badSRoland Dreier default: 2227be4c9badSRoland Dreier printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n", 2228be4c9badSRoland Dreier __func__, ep, ep->hwtid, ep->com.state); 2229be4c9badSRoland Dreier WARN_ON(1); 2230be4c9badSRoland Dreier abort = 0; 2231be4c9badSRoland Dreier } 2232be4c9badSRoland Dreier spin_unlock_irq(&ep->com.lock); 2233be4c9badSRoland Dreier if (abort) 2234be4c9badSRoland Dreier abort_connection(ep, NULL, GFP_KERNEL); 2235be4c9badSRoland Dreier c4iw_put_ep(&ep->com); 2236be4c9badSRoland Dreier } 2237be4c9badSRoland Dreier 2238be4c9badSRoland Dreier static void process_timedout_eps(void) 2239be4c9badSRoland Dreier { 2240be4c9badSRoland Dreier struct c4iw_ep *ep; 2241be4c9badSRoland Dreier 2242be4c9badSRoland Dreier spin_lock_irq(&timeout_lock); 2243be4c9badSRoland Dreier while (!list_empty(&timeout_list)) { 2244be4c9badSRoland Dreier struct list_head *tmp; 2245be4c9badSRoland Dreier 2246be4c9badSRoland Dreier tmp = timeout_list.next; 2247be4c9badSRoland Dreier list_del(tmp); 2248be4c9badSRoland Dreier spin_unlock_irq(&timeout_lock); 2249be4c9badSRoland Dreier ep = list_entry(tmp, struct c4iw_ep, entry); 2250be4c9badSRoland Dreier process_timeout(ep); 2251be4c9badSRoland Dreier spin_lock_irq(&timeout_lock); 2252be4c9badSRoland Dreier } 2253be4c9badSRoland Dreier spin_unlock_irq(&timeout_lock); 2254be4c9badSRoland Dreier } 2255be4c9badSRoland Dreier 2256be4c9badSRoland Dreier static void process_work(struct work_struct *work) 2257be4c9badSRoland Dreier { 2258be4c9badSRoland Dreier struct sk_buff *skb = NULL; 2259be4c9badSRoland Dreier struct c4iw_dev *dev; 2260c1d7356cSDan Carpenter struct cpl_act_establish *rpl; 2261be4c9badSRoland Dreier unsigned int opcode; 2262be4c9badSRoland Dreier int ret; 2263be4c9badSRoland Dreier 2264be4c9badSRoland Dreier while ((skb = skb_dequeue(&rxq))) { 2265be4c9badSRoland Dreier rpl = cplhdr(skb); 2266be4c9badSRoland Dreier dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); 2267be4c9badSRoland Dreier opcode = rpl->ot.opcode; 2268be4c9badSRoland Dreier 2269be4c9badSRoland Dreier BUG_ON(!work_handlers[opcode]); 2270be4c9badSRoland Dreier ret = work_handlers[opcode](dev, skb); 2271be4c9badSRoland Dreier if (!ret) 2272be4c9badSRoland Dreier kfree_skb(skb); 2273be4c9badSRoland Dreier } 2274be4c9badSRoland Dreier process_timedout_eps(); 2275be4c9badSRoland Dreier } 2276be4c9badSRoland Dreier 2277be4c9badSRoland Dreier static DECLARE_WORK(skb_work, process_work); 2278be4c9badSRoland Dreier 2279be4c9badSRoland Dreier static void ep_timeout(unsigned long arg) 2280be4c9badSRoland Dreier { 2281be4c9badSRoland Dreier struct c4iw_ep *ep = (struct c4iw_ep *)arg; 2282be4c9badSRoland Dreier 2283be4c9badSRoland Dreier spin_lock(&timeout_lock); 2284be4c9badSRoland Dreier list_add_tail(&ep->entry, &timeout_list); 2285be4c9badSRoland Dreier spin_unlock(&timeout_lock); 2286be4c9badSRoland Dreier queue_work(workq, &skb_work); 2287be4c9badSRoland Dreier } 2288be4c9badSRoland Dreier 2289be4c9badSRoland Dreier /* 2290cfdda9d7SSteve Wise * All the CM events are handled on a work queue to have a safe context. 2291cfdda9d7SSteve Wise */ 2292cfdda9d7SSteve Wise static int sched(struct c4iw_dev *dev, struct sk_buff *skb) 2293cfdda9d7SSteve Wise { 2294cfdda9d7SSteve Wise 2295cfdda9d7SSteve Wise /* 2296cfdda9d7SSteve Wise * Save dev in the skb->cb area. 2297cfdda9d7SSteve Wise */ 2298cfdda9d7SSteve Wise *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev; 2299cfdda9d7SSteve Wise 2300cfdda9d7SSteve Wise /* 2301cfdda9d7SSteve Wise * Queue the skb and schedule the worker thread. 2302cfdda9d7SSteve Wise */ 2303cfdda9d7SSteve Wise skb_queue_tail(&rxq, skb); 2304cfdda9d7SSteve Wise queue_work(workq, &skb_work); 2305cfdda9d7SSteve Wise return 0; 2306cfdda9d7SSteve Wise } 2307cfdda9d7SSteve Wise 2308cfdda9d7SSteve Wise static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2309cfdda9d7SSteve Wise { 2310cfdda9d7SSteve Wise struct cpl_set_tcb_rpl *rpl = cplhdr(skb); 2311cfdda9d7SSteve Wise 2312cfdda9d7SSteve Wise if (rpl->status != CPL_ERR_NONE) { 2313cfdda9d7SSteve Wise printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u " 2314cfdda9d7SSteve Wise "for tid %u\n", rpl->status, GET_TID(rpl)); 2315cfdda9d7SSteve Wise } 2316cfdda9d7SSteve Wise return 0; 2317cfdda9d7SSteve Wise } 2318cfdda9d7SSteve Wise 2319be4c9badSRoland Dreier static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) 2320be4c9badSRoland Dreier { 2321be4c9badSRoland Dreier struct cpl_fw6_msg *rpl = cplhdr(skb); 2322be4c9badSRoland Dreier struct c4iw_wr_wait *wr_waitp; 2323be4c9badSRoland Dreier int ret; 2324be4c9badSRoland Dreier 2325be4c9badSRoland Dreier PDBG("%s type %u\n", __func__, rpl->type); 2326be4c9badSRoland Dreier 2327be4c9badSRoland Dreier switch (rpl->type) { 2328be4c9badSRoland Dreier case 1: 2329be4c9badSRoland Dreier ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); 2330be4c9badSRoland Dreier wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1]; 2331be4c9badSRoland Dreier PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); 2332be4c9badSRoland Dreier if (wr_waitp) { 2333be4c9badSRoland Dreier wr_waitp->ret = ret; 2334be4c9badSRoland Dreier wr_waitp->done = 1; 2335be4c9badSRoland Dreier wake_up(&wr_waitp->wait); 2336be4c9badSRoland Dreier } 2337be4c9badSRoland Dreier break; 2338be4c9badSRoland Dreier case 2: 2339be4c9badSRoland Dreier c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); 2340be4c9badSRoland Dreier break; 2341be4c9badSRoland Dreier default: 2342be4c9badSRoland Dreier printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, 2343be4c9badSRoland Dreier rpl->type); 2344be4c9badSRoland Dreier break; 2345be4c9badSRoland Dreier } 2346be4c9badSRoland Dreier return 0; 2347be4c9badSRoland Dreier } 2348be4c9badSRoland Dreier 2349be4c9badSRoland Dreier /* 2350be4c9badSRoland Dreier * Most upcalls from the T4 Core go to sched() to 2351be4c9badSRoland Dreier * schedule the processing on a work queue. 2352be4c9badSRoland Dreier */ 2353be4c9badSRoland Dreier c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = { 2354be4c9badSRoland Dreier [CPL_ACT_ESTABLISH] = sched, 2355be4c9badSRoland Dreier [CPL_ACT_OPEN_RPL] = sched, 2356be4c9badSRoland Dreier [CPL_RX_DATA] = sched, 2357be4c9badSRoland Dreier [CPL_ABORT_RPL_RSS] = sched, 2358be4c9badSRoland Dreier [CPL_ABORT_RPL] = sched, 2359be4c9badSRoland Dreier [CPL_PASS_OPEN_RPL] = sched, 2360be4c9badSRoland Dreier [CPL_CLOSE_LISTSRV_RPL] = sched, 2361be4c9badSRoland Dreier [CPL_PASS_ACCEPT_REQ] = sched, 2362be4c9badSRoland Dreier [CPL_PASS_ESTABLISH] = sched, 2363be4c9badSRoland Dreier [CPL_PEER_CLOSE] = sched, 2364be4c9badSRoland Dreier [CPL_CLOSE_CON_RPL] = sched, 2365be4c9badSRoland Dreier [CPL_ABORT_REQ_RSS] = sched, 2366be4c9badSRoland Dreier [CPL_RDMA_TERMINATE] = sched, 2367be4c9badSRoland Dreier [CPL_FW4_ACK] = sched, 2368be4c9badSRoland Dreier [CPL_SET_TCB_RPL] = set_tcb_rpl, 2369be4c9badSRoland Dreier [CPL_FW6_MSG] = fw6_msg 2370be4c9badSRoland Dreier }; 2371be4c9badSRoland Dreier 2372cfdda9d7SSteve Wise int __init c4iw_cm_init(void) 2373cfdda9d7SSteve Wise { 2374be4c9badSRoland Dreier spin_lock_init(&timeout_lock); 2375cfdda9d7SSteve Wise skb_queue_head_init(&rxq); 2376cfdda9d7SSteve Wise 2377cfdda9d7SSteve Wise workq = create_singlethread_workqueue("iw_cxgb4"); 2378cfdda9d7SSteve Wise if (!workq) 2379cfdda9d7SSteve Wise return -ENOMEM; 2380cfdda9d7SSteve Wise 2381cfdda9d7SSteve Wise return 0; 2382cfdda9d7SSteve Wise } 2383cfdda9d7SSteve Wise 2384cfdda9d7SSteve Wise void __exit c4iw_cm_term(void) 2385cfdda9d7SSteve Wise { 2386be4c9badSRoland Dreier WARN_ON(!list_empty(&timeout_list)); 2387cfdda9d7SSteve Wise flush_workqueue(workq); 2388cfdda9d7SSteve Wise destroy_workqueue(workq); 2389cfdda9d7SSteve Wise } 2390