1cfdda9d7SSteve Wise /* 2cfdda9d7SSteve Wise * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. 3cfdda9d7SSteve Wise * 4cfdda9d7SSteve Wise * This software is available to you under a choice of one of two 5cfdda9d7SSteve Wise * licenses. You may choose to be licensed under the terms of the GNU 6cfdda9d7SSteve Wise * General Public License (GPL) Version 2, available from the file 7cfdda9d7SSteve Wise * COPYING in the main directory of this source tree, or the 8cfdda9d7SSteve Wise * OpenIB.org BSD license below: 9cfdda9d7SSteve Wise * 10cfdda9d7SSteve Wise * Redistribution and use in source and binary forms, with or 11cfdda9d7SSteve Wise * without modification, are permitted provided that the following 12cfdda9d7SSteve Wise * conditions are met: 13cfdda9d7SSteve Wise * 14cfdda9d7SSteve Wise * - Redistributions of source code must retain the above 15cfdda9d7SSteve Wise * copyright notice, this list of conditions and the following 16cfdda9d7SSteve Wise * disclaimer. 17cfdda9d7SSteve Wise * 18cfdda9d7SSteve Wise * - Redistributions in binary form must reproduce the above 19cfdda9d7SSteve Wise * copyright notice, this list of conditions and the following 20cfdda9d7SSteve Wise * disclaimer in the documentation and/or other materials 21cfdda9d7SSteve Wise * provided with the distribution. 22cfdda9d7SSteve Wise * 23cfdda9d7SSteve Wise * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24cfdda9d7SSteve Wise * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25cfdda9d7SSteve Wise * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26cfdda9d7SSteve Wise * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27cfdda9d7SSteve Wise * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28cfdda9d7SSteve Wise * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29cfdda9d7SSteve Wise * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30cfdda9d7SSteve Wise * SOFTWARE. 31cfdda9d7SSteve Wise */ 32cfdda9d7SSteve Wise #include <linux/module.h> 33cfdda9d7SSteve Wise #include <linux/list.h> 34cfdda9d7SSteve Wise #include <linux/workqueue.h> 35cfdda9d7SSteve Wise #include <linux/skbuff.h> 36cfdda9d7SSteve Wise #include <linux/timer.h> 37cfdda9d7SSteve Wise #include <linux/notifier.h> 38cfdda9d7SSteve Wise #include <linux/inetdevice.h> 39cfdda9d7SSteve Wise #include <linux/ip.h> 40cfdda9d7SSteve Wise #include <linux/tcp.h> 411cab775cSVipul Pandya #include <linux/if_vlan.h> 42cfdda9d7SSteve Wise 43cfdda9d7SSteve Wise #include <net/neighbour.h> 44cfdda9d7SSteve Wise #include <net/netevent.h> 45cfdda9d7SSteve Wise #include <net/route.h> 461cab775cSVipul Pandya #include <net/tcp.h> 47cfdda9d7SSteve Wise 48cfdda9d7SSteve Wise #include "iw_cxgb4.h" 49cfdda9d7SSteve Wise 50cfdda9d7SSteve Wise static char *states[] = { 51cfdda9d7SSteve Wise "idle", 52cfdda9d7SSteve Wise "listen", 53cfdda9d7SSteve Wise "connecting", 54cfdda9d7SSteve Wise "mpa_wait_req", 55cfdda9d7SSteve Wise "mpa_req_sent", 56cfdda9d7SSteve Wise "mpa_req_rcvd", 57cfdda9d7SSteve Wise "mpa_rep_sent", 58cfdda9d7SSteve Wise "fpdu_mode", 59cfdda9d7SSteve Wise "aborting", 60cfdda9d7SSteve Wise "closing", 61cfdda9d7SSteve Wise "moribund", 62cfdda9d7SSteve Wise "dead", 63cfdda9d7SSteve Wise NULL, 64cfdda9d7SSteve Wise }; 65cfdda9d7SSteve Wise 665be78ee9SVipul Pandya static int nocong; 675be78ee9SVipul Pandya module_param(nocong, int, 0644); 685be78ee9SVipul Pandya MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)"); 695be78ee9SVipul Pandya 705be78ee9SVipul Pandya static int enable_ecn; 715be78ee9SVipul Pandya module_param(enable_ecn, int, 0644); 725be78ee9SVipul Pandya MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)"); 735be78ee9SVipul Pandya 74b52fe09eSSteve Wise static int dack_mode = 1; 75ba6d3925SSteve Wise module_param(dack_mode, int, 0644); 76b52fe09eSSteve Wise MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)"); 77ba6d3925SSteve Wise 78be4c9badSRoland Dreier int c4iw_max_read_depth = 8; 79be4c9badSRoland Dreier module_param(c4iw_max_read_depth, int, 0644); 80be4c9badSRoland Dreier MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)"); 81be4c9badSRoland Dreier 82cfdda9d7SSteve Wise static int enable_tcp_timestamps; 83cfdda9d7SSteve Wise module_param(enable_tcp_timestamps, int, 0644); 84cfdda9d7SSteve Wise MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); 85cfdda9d7SSteve Wise 86cfdda9d7SSteve Wise static int enable_tcp_sack; 87cfdda9d7SSteve Wise module_param(enable_tcp_sack, int, 0644); 88cfdda9d7SSteve Wise MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)"); 89cfdda9d7SSteve Wise 90cfdda9d7SSteve Wise static int enable_tcp_window_scaling = 1; 91cfdda9d7SSteve Wise module_param(enable_tcp_window_scaling, int, 0644); 92cfdda9d7SSteve Wise MODULE_PARM_DESC(enable_tcp_window_scaling, 93cfdda9d7SSteve Wise "Enable tcp window scaling (default=1)"); 94cfdda9d7SSteve Wise 95cfdda9d7SSteve Wise int c4iw_debug; 96cfdda9d7SSteve Wise module_param(c4iw_debug, int, 0644); 97cfdda9d7SSteve Wise MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)"); 98cfdda9d7SSteve Wise 99cfdda9d7SSteve Wise static int peer2peer; 100cfdda9d7SSteve Wise module_param(peer2peer, int, 0644); 101cfdda9d7SSteve Wise MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)"); 102cfdda9d7SSteve Wise 103cfdda9d7SSteve Wise static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; 104cfdda9d7SSteve Wise module_param(p2p_type, int, 0644); 105cfdda9d7SSteve Wise MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: " 106cfdda9d7SSteve Wise "1=RDMA_READ 0=RDMA_WRITE (default 1)"); 107cfdda9d7SSteve Wise 108cfdda9d7SSteve Wise static int ep_timeout_secs = 60; 109cfdda9d7SSteve Wise module_param(ep_timeout_secs, int, 0644); 110cfdda9d7SSteve Wise MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " 111cfdda9d7SSteve Wise "in seconds (default=60)"); 112cfdda9d7SSteve Wise 113cfdda9d7SSteve Wise static int mpa_rev = 1; 114cfdda9d7SSteve Wise module_param(mpa_rev, int, 0644); 115cfdda9d7SSteve Wise MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " 116d2fe99e8SKumar Sanghvi "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft" 117d2fe99e8SKumar Sanghvi " compliant (default=1)"); 118cfdda9d7SSteve Wise 119cfdda9d7SSteve Wise static int markers_enabled; 120cfdda9d7SSteve Wise module_param(markers_enabled, int, 0644); 121cfdda9d7SSteve Wise MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)"); 122cfdda9d7SSteve Wise 123cfdda9d7SSteve Wise static int crc_enabled = 1; 124cfdda9d7SSteve Wise module_param(crc_enabled, int, 0644); 125cfdda9d7SSteve Wise MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)"); 126cfdda9d7SSteve Wise 127cfdda9d7SSteve Wise static int rcv_win = 256 * 1024; 128cfdda9d7SSteve Wise module_param(rcv_win, int, 0644); 129cfdda9d7SSteve Wise MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)"); 130cfdda9d7SSteve Wise 13198ae68b7SSteve Wise static int snd_win = 128 * 1024; 132cfdda9d7SSteve Wise module_param(snd_win, int, 0644); 13398ae68b7SSteve Wise MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)"); 134cfdda9d7SSteve Wise 135cfdda9d7SSteve Wise static struct workqueue_struct *workq; 136cfdda9d7SSteve Wise 137cfdda9d7SSteve Wise static struct sk_buff_head rxq; 138cfdda9d7SSteve Wise 139cfdda9d7SSteve Wise static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); 140cfdda9d7SSteve Wise static void ep_timeout(unsigned long arg); 141cfdda9d7SSteve Wise static void connect_reply_upcall(struct c4iw_ep *ep, int status); 142cfdda9d7SSteve Wise 143be4c9badSRoland Dreier static LIST_HEAD(timeout_list); 144be4c9badSRoland Dreier static spinlock_t timeout_lock; 145be4c9badSRoland Dreier 146cfdda9d7SSteve Wise static void start_ep_timer(struct c4iw_ep *ep) 147cfdda9d7SSteve Wise { 148cfdda9d7SSteve Wise PDBG("%s ep %p\n", __func__, ep); 149cfdda9d7SSteve Wise if (timer_pending(&ep->timer)) { 150cfdda9d7SSteve Wise PDBG("%s stopped / restarted timer ep %p\n", __func__, ep); 151cfdda9d7SSteve Wise del_timer_sync(&ep->timer); 152cfdda9d7SSteve Wise } else 153cfdda9d7SSteve Wise c4iw_get_ep(&ep->com); 154cfdda9d7SSteve Wise ep->timer.expires = jiffies + ep_timeout_secs * HZ; 155cfdda9d7SSteve Wise ep->timer.data = (unsigned long)ep; 156cfdda9d7SSteve Wise ep->timer.function = ep_timeout; 157cfdda9d7SSteve Wise add_timer(&ep->timer); 158cfdda9d7SSteve Wise } 159cfdda9d7SSteve Wise 160cfdda9d7SSteve Wise static void stop_ep_timer(struct c4iw_ep *ep) 161cfdda9d7SSteve Wise { 162cfdda9d7SSteve Wise PDBG("%s ep %p\n", __func__, ep); 163cfdda9d7SSteve Wise if (!timer_pending(&ep->timer)) { 16476f267b7SJulia Lawall WARN(1, "%s timer stopped when its not running! " 165cfdda9d7SSteve Wise "ep %p state %u\n", __func__, ep, ep->com.state); 166cfdda9d7SSteve Wise return; 167cfdda9d7SSteve Wise } 168cfdda9d7SSteve Wise del_timer_sync(&ep->timer); 169cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 170cfdda9d7SSteve Wise } 171cfdda9d7SSteve Wise 172cfdda9d7SSteve Wise static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, 173cfdda9d7SSteve Wise struct l2t_entry *l2e) 174cfdda9d7SSteve Wise { 175cfdda9d7SSteve Wise int error = 0; 176cfdda9d7SSteve Wise 177cfdda9d7SSteve Wise if (c4iw_fatal_error(rdev)) { 178cfdda9d7SSteve Wise kfree_skb(skb); 179cfdda9d7SSteve Wise PDBG("%s - device in error state - dropping\n", __func__); 180cfdda9d7SSteve Wise return -EIO; 181cfdda9d7SSteve Wise } 182cfdda9d7SSteve Wise error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); 183cfdda9d7SSteve Wise if (error < 0) 184cfdda9d7SSteve Wise kfree_skb(skb); 18574594861SSteve Wise return error < 0 ? error : 0; 186cfdda9d7SSteve Wise } 187cfdda9d7SSteve Wise 188cfdda9d7SSteve Wise int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) 189cfdda9d7SSteve Wise { 190cfdda9d7SSteve Wise int error = 0; 191cfdda9d7SSteve Wise 192cfdda9d7SSteve Wise if (c4iw_fatal_error(rdev)) { 193cfdda9d7SSteve Wise kfree_skb(skb); 194cfdda9d7SSteve Wise PDBG("%s - device in error state - dropping\n", __func__); 195cfdda9d7SSteve Wise return -EIO; 196cfdda9d7SSteve Wise } 197cfdda9d7SSteve Wise error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); 198cfdda9d7SSteve Wise if (error < 0) 199cfdda9d7SSteve Wise kfree_skb(skb); 20074594861SSteve Wise return error < 0 ? error : 0; 201cfdda9d7SSteve Wise } 202cfdda9d7SSteve Wise 203cfdda9d7SSteve Wise static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) 204cfdda9d7SSteve Wise { 205cfdda9d7SSteve Wise struct cpl_tid_release *req; 206cfdda9d7SSteve Wise 207cfdda9d7SSteve Wise skb = get_skb(skb, sizeof *req, GFP_KERNEL); 208cfdda9d7SSteve Wise if (!skb) 209cfdda9d7SSteve Wise return; 210cfdda9d7SSteve Wise req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); 211cfdda9d7SSteve Wise INIT_TP_WR(req, hwtid); 212cfdda9d7SSteve Wise OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); 213cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); 214cfdda9d7SSteve Wise c4iw_ofld_send(rdev, skb); 215cfdda9d7SSteve Wise return; 216cfdda9d7SSteve Wise } 217cfdda9d7SSteve Wise 218cfdda9d7SSteve Wise static void set_emss(struct c4iw_ep *ep, u16 opt) 219cfdda9d7SSteve Wise { 220cfdda9d7SSteve Wise ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40; 221cfdda9d7SSteve Wise ep->mss = ep->emss; 222cfdda9d7SSteve Wise if (GET_TCPOPT_TSTAMP(opt)) 223cfdda9d7SSteve Wise ep->emss -= 12; 224cfdda9d7SSteve Wise if (ep->emss < 128) 225cfdda9d7SSteve Wise ep->emss = 128; 226cfdda9d7SSteve Wise PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt), 227cfdda9d7SSteve Wise ep->mss, ep->emss); 228cfdda9d7SSteve Wise } 229cfdda9d7SSteve Wise 230cfdda9d7SSteve Wise static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) 231cfdda9d7SSteve Wise { 232cfdda9d7SSteve Wise enum c4iw_ep_state state; 233cfdda9d7SSteve Wise 2342f5b48c3SSteve Wise mutex_lock(&epc->mutex); 235cfdda9d7SSteve Wise state = epc->state; 2362f5b48c3SSteve Wise mutex_unlock(&epc->mutex); 237cfdda9d7SSteve Wise return state; 238cfdda9d7SSteve Wise } 239cfdda9d7SSteve Wise 240cfdda9d7SSteve Wise static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 241cfdda9d7SSteve Wise { 242cfdda9d7SSteve Wise epc->state = new; 243cfdda9d7SSteve Wise } 244cfdda9d7SSteve Wise 245cfdda9d7SSteve Wise static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 246cfdda9d7SSteve Wise { 2472f5b48c3SSteve Wise mutex_lock(&epc->mutex); 248cfdda9d7SSteve Wise PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]); 249cfdda9d7SSteve Wise __state_set(epc, new); 2502f5b48c3SSteve Wise mutex_unlock(&epc->mutex); 251cfdda9d7SSteve Wise return; 252cfdda9d7SSteve Wise } 253cfdda9d7SSteve Wise 254cfdda9d7SSteve Wise static void *alloc_ep(int size, gfp_t gfp) 255cfdda9d7SSteve Wise { 256cfdda9d7SSteve Wise struct c4iw_ep_common *epc; 257cfdda9d7SSteve Wise 258cfdda9d7SSteve Wise epc = kzalloc(size, gfp); 259cfdda9d7SSteve Wise if (epc) { 260cfdda9d7SSteve Wise kref_init(&epc->kref); 2612f5b48c3SSteve Wise mutex_init(&epc->mutex); 262aadc4df3SSteve Wise c4iw_init_wr_wait(&epc->wr_wait); 263cfdda9d7SSteve Wise } 264cfdda9d7SSteve Wise PDBG("%s alloc ep %p\n", __func__, epc); 265cfdda9d7SSteve Wise return epc; 266cfdda9d7SSteve Wise } 267cfdda9d7SSteve Wise 268cfdda9d7SSteve Wise void _c4iw_free_ep(struct kref *kref) 269cfdda9d7SSteve Wise { 270cfdda9d7SSteve Wise struct c4iw_ep *ep; 271cfdda9d7SSteve Wise 272cfdda9d7SSteve Wise ep = container_of(kref, struct c4iw_ep, com.kref); 273cfdda9d7SSteve Wise PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); 274cfdda9d7SSteve Wise if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { 275cfdda9d7SSteve Wise cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); 276cfdda9d7SSteve Wise dst_release(ep->dst); 277cfdda9d7SSteve Wise cxgb4_l2t_release(ep->l2t); 278793dad94SVipul Pandya remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); 279cfdda9d7SSteve Wise } 280cfdda9d7SSteve Wise kfree(ep); 281cfdda9d7SSteve Wise } 282cfdda9d7SSteve Wise 283cfdda9d7SSteve Wise static void release_ep_resources(struct c4iw_ep *ep) 284cfdda9d7SSteve Wise { 285cfdda9d7SSteve Wise set_bit(RELEASE_RESOURCES, &ep->com.flags); 286cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 287cfdda9d7SSteve Wise } 288cfdda9d7SSteve Wise 289cfdda9d7SSteve Wise static int status2errno(int status) 290cfdda9d7SSteve Wise { 291cfdda9d7SSteve Wise switch (status) { 292cfdda9d7SSteve Wise case CPL_ERR_NONE: 293cfdda9d7SSteve Wise return 0; 294cfdda9d7SSteve Wise case CPL_ERR_CONN_RESET: 295cfdda9d7SSteve Wise return -ECONNRESET; 296cfdda9d7SSteve Wise case CPL_ERR_ARP_MISS: 297cfdda9d7SSteve Wise return -EHOSTUNREACH; 298cfdda9d7SSteve Wise case CPL_ERR_CONN_TIMEDOUT: 299cfdda9d7SSteve Wise return -ETIMEDOUT; 300cfdda9d7SSteve Wise case CPL_ERR_TCAM_FULL: 301cfdda9d7SSteve Wise return -ENOMEM; 302cfdda9d7SSteve Wise case CPL_ERR_CONN_EXIST: 303cfdda9d7SSteve Wise return -EADDRINUSE; 304cfdda9d7SSteve Wise default: 305cfdda9d7SSteve Wise return -EIO; 306cfdda9d7SSteve Wise } 307cfdda9d7SSteve Wise } 308cfdda9d7SSteve Wise 309cfdda9d7SSteve Wise /* 310cfdda9d7SSteve Wise * Try and reuse skbs already allocated... 311cfdda9d7SSteve Wise */ 312cfdda9d7SSteve Wise static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) 313cfdda9d7SSteve Wise { 314cfdda9d7SSteve Wise if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) { 315cfdda9d7SSteve Wise skb_trim(skb, 0); 316cfdda9d7SSteve Wise skb_get(skb); 317cfdda9d7SSteve Wise skb_reset_transport_header(skb); 318cfdda9d7SSteve Wise } else { 319cfdda9d7SSteve Wise skb = alloc_skb(len, gfp); 320cfdda9d7SSteve Wise } 321cfdda9d7SSteve Wise return skb; 322cfdda9d7SSteve Wise } 323cfdda9d7SSteve Wise 324cfdda9d7SSteve Wise static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip, 325cfdda9d7SSteve Wise __be32 peer_ip, __be16 local_port, 326cfdda9d7SSteve Wise __be16 peer_port, u8 tos) 327cfdda9d7SSteve Wise { 328cfdda9d7SSteve Wise struct rtable *rt; 32931e4543dSDavid S. Miller struct flowi4 fl4; 330cfdda9d7SSteve Wise 33131e4543dSDavid S. Miller rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip, 33278fbfd8aSDavid S. Miller peer_port, local_port, IPPROTO_TCP, 33378fbfd8aSDavid S. Miller tos, 0); 334b23dd4feSDavid S. Miller if (IS_ERR(rt)) 335cfdda9d7SSteve Wise return NULL; 336cfdda9d7SSteve Wise return rt; 337cfdda9d7SSteve Wise } 338cfdda9d7SSteve Wise 339cfdda9d7SSteve Wise static void arp_failure_discard(void *handle, struct sk_buff *skb) 340cfdda9d7SSteve Wise { 341cfdda9d7SSteve Wise PDBG("%s c4iw_dev %p\n", __func__, handle); 342cfdda9d7SSteve Wise kfree_skb(skb); 343cfdda9d7SSteve Wise } 344cfdda9d7SSteve Wise 345cfdda9d7SSteve Wise /* 346cfdda9d7SSteve Wise * Handle an ARP failure for an active open. 347cfdda9d7SSteve Wise */ 348cfdda9d7SSteve Wise static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) 349cfdda9d7SSteve Wise { 350cfdda9d7SSteve Wise printk(KERN_ERR MOD "ARP failure duing connect\n"); 351cfdda9d7SSteve Wise kfree_skb(skb); 352cfdda9d7SSteve Wise } 353cfdda9d7SSteve Wise 354cfdda9d7SSteve Wise /* 355cfdda9d7SSteve Wise * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant 356cfdda9d7SSteve Wise * and send it along. 357cfdda9d7SSteve Wise */ 358cfdda9d7SSteve Wise static void abort_arp_failure(void *handle, struct sk_buff *skb) 359cfdda9d7SSteve Wise { 360cfdda9d7SSteve Wise struct c4iw_rdev *rdev = handle; 361cfdda9d7SSteve Wise struct cpl_abort_req *req = cplhdr(skb); 362cfdda9d7SSteve Wise 363cfdda9d7SSteve Wise PDBG("%s rdev %p\n", __func__, rdev); 364cfdda9d7SSteve Wise req->cmd = CPL_ABORT_NO_RST; 365cfdda9d7SSteve Wise c4iw_ofld_send(rdev, skb); 366cfdda9d7SSteve Wise } 367cfdda9d7SSteve Wise 368cfdda9d7SSteve Wise static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) 369cfdda9d7SSteve Wise { 370cfdda9d7SSteve Wise unsigned int flowclen = 80; 371cfdda9d7SSteve Wise struct fw_flowc_wr *flowc; 372cfdda9d7SSteve Wise int i; 373cfdda9d7SSteve Wise 374cfdda9d7SSteve Wise skb = get_skb(skb, flowclen, GFP_KERNEL); 375cfdda9d7SSteve Wise flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); 376cfdda9d7SSteve Wise 377cfdda9d7SSteve Wise flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) | 378cfdda9d7SSteve Wise FW_FLOWC_WR_NPARAMS(8)); 379cfdda9d7SSteve Wise flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen, 380cfdda9d7SSteve Wise 16)) | FW_WR_FLOWID(ep->hwtid)); 381cfdda9d7SSteve Wise 382cfdda9d7SSteve Wise flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 38394788657SSteve Wise flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8); 384cfdda9d7SSteve Wise flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 385cfdda9d7SSteve Wise flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); 386cfdda9d7SSteve Wise flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 387cfdda9d7SSteve Wise flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan); 388cfdda9d7SSteve Wise flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 389cfdda9d7SSteve Wise flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid); 390cfdda9d7SSteve Wise flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 391cfdda9d7SSteve Wise flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq); 392cfdda9d7SSteve Wise flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 393cfdda9d7SSteve Wise flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); 394cfdda9d7SSteve Wise flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 395cfdda9d7SSteve Wise flowc->mnemval[6].val = cpu_to_be32(snd_win); 396cfdda9d7SSteve Wise flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 397cfdda9d7SSteve Wise flowc->mnemval[7].val = cpu_to_be32(ep->emss); 398cfdda9d7SSteve Wise /* Pad WR to 16 byte boundary */ 399cfdda9d7SSteve Wise flowc->mnemval[8].mnemonic = 0; 400cfdda9d7SSteve Wise flowc->mnemval[8].val = 0; 401cfdda9d7SSteve Wise for (i = 0; i < 9; i++) { 402cfdda9d7SSteve Wise flowc->mnemval[i].r4[0] = 0; 403cfdda9d7SSteve Wise flowc->mnemval[i].r4[1] = 0; 404cfdda9d7SSteve Wise flowc->mnemval[i].r4[2] = 0; 405cfdda9d7SSteve Wise } 406cfdda9d7SSteve Wise 407cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 408cfdda9d7SSteve Wise c4iw_ofld_send(&ep->com.dev->rdev, skb); 409cfdda9d7SSteve Wise } 410cfdda9d7SSteve Wise 411cfdda9d7SSteve Wise static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp) 412cfdda9d7SSteve Wise { 413cfdda9d7SSteve Wise struct cpl_close_con_req *req; 414cfdda9d7SSteve Wise struct sk_buff *skb; 415cfdda9d7SSteve Wise int wrlen = roundup(sizeof *req, 16); 416cfdda9d7SSteve Wise 417cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 418cfdda9d7SSteve Wise skb = get_skb(NULL, wrlen, gfp); 419cfdda9d7SSteve Wise if (!skb) { 420cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); 421cfdda9d7SSteve Wise return -ENOMEM; 422cfdda9d7SSteve Wise } 423cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 424cfdda9d7SSteve Wise t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 425cfdda9d7SSteve Wise req = (struct cpl_close_con_req *) skb_put(skb, wrlen); 426cfdda9d7SSteve Wise memset(req, 0, wrlen); 427cfdda9d7SSteve Wise INIT_TP_WR(req, ep->hwtid); 428cfdda9d7SSteve Wise OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, 429cfdda9d7SSteve Wise ep->hwtid)); 430cfdda9d7SSteve Wise return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 431cfdda9d7SSteve Wise } 432cfdda9d7SSteve Wise 433cfdda9d7SSteve Wise static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) 434cfdda9d7SSteve Wise { 435cfdda9d7SSteve Wise struct cpl_abort_req *req; 436cfdda9d7SSteve Wise int wrlen = roundup(sizeof *req, 16); 437cfdda9d7SSteve Wise 438cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 439cfdda9d7SSteve Wise skb = get_skb(skb, wrlen, gfp); 440cfdda9d7SSteve Wise if (!skb) { 441cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 442cfdda9d7SSteve Wise __func__); 443cfdda9d7SSteve Wise return -ENOMEM; 444cfdda9d7SSteve Wise } 445cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 446cfdda9d7SSteve Wise t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure); 447cfdda9d7SSteve Wise req = (struct cpl_abort_req *) skb_put(skb, wrlen); 448cfdda9d7SSteve Wise memset(req, 0, wrlen); 449cfdda9d7SSteve Wise INIT_TP_WR(req, ep->hwtid); 450cfdda9d7SSteve Wise OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); 451cfdda9d7SSteve Wise req->cmd = CPL_ABORT_SEND_RST; 452cfdda9d7SSteve Wise return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 453cfdda9d7SSteve Wise } 454cfdda9d7SSteve Wise 4555be78ee9SVipul Pandya #define VLAN_NONE 0xfff 4565be78ee9SVipul Pandya #define FILTER_SEL_VLAN_NONE 0xffff 4575be78ee9SVipul Pandya #define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */ 4585be78ee9SVipul Pandya #define FILTER_SEL_WIDTH_VIN_P_FC \ 4595be78ee9SVipul Pandya (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/ 4605be78ee9SVipul Pandya #define FILTER_SEL_WIDTH_TAG_P_FC \ 4615be78ee9SVipul Pandya (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */ 4625be78ee9SVipul Pandya #define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC) 4635be78ee9SVipul Pandya 4645be78ee9SVipul Pandya static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst, 4655be78ee9SVipul Pandya struct l2t_entry *l2t) 4665be78ee9SVipul Pandya { 4675be78ee9SVipul Pandya unsigned int ntuple = 0; 4685be78ee9SVipul Pandya u32 viid; 4695be78ee9SVipul Pandya 4705be78ee9SVipul Pandya switch (dev->rdev.lldi.filt_mode) { 4715be78ee9SVipul Pandya 4725be78ee9SVipul Pandya /* default filter mode */ 4735be78ee9SVipul Pandya case HW_TPL_FR_MT_PR_IV_P_FC: 4745be78ee9SVipul Pandya if (l2t->vlan == VLAN_NONE) 4755be78ee9SVipul Pandya ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC; 4765be78ee9SVipul Pandya else { 4775be78ee9SVipul Pandya ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC; 4785be78ee9SVipul Pandya ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC; 4795be78ee9SVipul Pandya } 4805be78ee9SVipul Pandya ntuple |= l2t->lport << S_PORT | IPPROTO_TCP << 4815be78ee9SVipul Pandya FILTER_SEL_WIDTH_VLD_TAG_P_FC; 4825be78ee9SVipul Pandya break; 4835be78ee9SVipul Pandya case HW_TPL_FR_MT_PR_OV_P_FC: { 4845be78ee9SVipul Pandya viid = cxgb4_port_viid(l2t->neigh->dev); 4855be78ee9SVipul Pandya 4865be78ee9SVipul Pandya ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC; 4875be78ee9SVipul Pandya ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC; 4885be78ee9SVipul Pandya ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC; 4895be78ee9SVipul Pandya ntuple |= l2t->lport << S_PORT | IPPROTO_TCP << 4905be78ee9SVipul Pandya FILTER_SEL_WIDTH_VLD_TAG_P_FC; 4915be78ee9SVipul Pandya break; 4925be78ee9SVipul Pandya } 4935be78ee9SVipul Pandya default: 4945be78ee9SVipul Pandya break; 4955be78ee9SVipul Pandya } 4965be78ee9SVipul Pandya return ntuple; 4975be78ee9SVipul Pandya } 4985be78ee9SVipul Pandya 499cfdda9d7SSteve Wise static int send_connect(struct c4iw_ep *ep) 500cfdda9d7SSteve Wise { 501cfdda9d7SSteve Wise struct cpl_act_open_req *req; 502cfdda9d7SSteve Wise struct sk_buff *skb; 503cfdda9d7SSteve Wise u64 opt0; 504cfdda9d7SSteve Wise u32 opt2; 505cfdda9d7SSteve Wise unsigned int mtu_idx; 506cfdda9d7SSteve Wise int wscale; 507cfdda9d7SSteve Wise int wrlen = roundup(sizeof *req, 16); 508cfdda9d7SSteve Wise 509cfdda9d7SSteve Wise PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); 510cfdda9d7SSteve Wise 511cfdda9d7SSteve Wise skb = get_skb(NULL, wrlen, GFP_KERNEL); 512cfdda9d7SSteve Wise if (!skb) { 513cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 514cfdda9d7SSteve Wise __func__); 515cfdda9d7SSteve Wise return -ENOMEM; 516cfdda9d7SSteve Wise } 517d4f1a5c6SSteve Wise set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 518cfdda9d7SSteve Wise 519cfdda9d7SSteve Wise cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 520cfdda9d7SSteve Wise wscale = compute_wscale(rcv_win); 5215be78ee9SVipul Pandya opt0 = (nocong ? NO_CONG(1) : 0) | 5225be78ee9SVipul Pandya KEEP_ALIVE(1) | 523ba6d3925SSteve Wise DELACK(1) | 524cfdda9d7SSteve Wise WND_SCALE(wscale) | 525cfdda9d7SSteve Wise MSS_IDX(mtu_idx) | 526cfdda9d7SSteve Wise L2T_IDX(ep->l2t->idx) | 527cfdda9d7SSteve Wise TX_CHAN(ep->tx_chan) | 528cfdda9d7SSteve Wise SMAC_SEL(ep->smac_idx) | 529cfdda9d7SSteve Wise DSCP(ep->tos) | 530b48f3b9cSSteve Wise ULP_MODE(ULP_MODE_TCPDDP) | 531cfdda9d7SSteve Wise RCV_BUFSIZ(rcv_win>>10); 532cfdda9d7SSteve Wise opt2 = RX_CHANNEL(0) | 5335be78ee9SVipul Pandya CCTRL_ECN(enable_ecn) | 534cfdda9d7SSteve Wise RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 535cfdda9d7SSteve Wise if (enable_tcp_timestamps) 536cfdda9d7SSteve Wise opt2 |= TSTAMPS_EN(1); 537cfdda9d7SSteve Wise if (enable_tcp_sack) 538cfdda9d7SSteve Wise opt2 |= SACK_EN(1); 539cfdda9d7SSteve Wise if (wscale && enable_tcp_window_scaling) 540cfdda9d7SSteve Wise opt2 |= WND_SCALE_EN(1); 541cfdda9d7SSteve Wise t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); 542cfdda9d7SSteve Wise 543cfdda9d7SSteve Wise req = (struct cpl_act_open_req *) skb_put(skb, wrlen); 544cfdda9d7SSteve Wise INIT_TP_WR(req, 0); 545cfdda9d7SSteve Wise OPCODE_TID(req) = cpu_to_be32( 546cfdda9d7SSteve Wise MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid))); 547cfdda9d7SSteve Wise req->local_port = ep->com.local_addr.sin_port; 548cfdda9d7SSteve Wise req->peer_port = ep->com.remote_addr.sin_port; 549cfdda9d7SSteve Wise req->local_ip = ep->com.local_addr.sin_addr.s_addr; 550cfdda9d7SSteve Wise req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; 551cfdda9d7SSteve Wise req->opt0 = cpu_to_be64(opt0); 5525be78ee9SVipul Pandya req->params = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, ep->l2t)); 553cfdda9d7SSteve Wise req->opt2 = cpu_to_be32(opt2); 554793dad94SVipul Pandya set_bit(ACT_OPEN_REQ, &ep->com.history); 555cfdda9d7SSteve Wise return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 556cfdda9d7SSteve Wise } 557cfdda9d7SSteve Wise 558d2fe99e8SKumar Sanghvi static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, 559d2fe99e8SKumar Sanghvi u8 mpa_rev_to_use) 560cfdda9d7SSteve Wise { 561cfdda9d7SSteve Wise int mpalen, wrlen; 562cfdda9d7SSteve Wise struct fw_ofld_tx_data_wr *req; 563cfdda9d7SSteve Wise struct mpa_message *mpa; 564d2fe99e8SKumar Sanghvi struct mpa_v2_conn_params mpa_v2_params; 565cfdda9d7SSteve Wise 566cfdda9d7SSteve Wise PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 567cfdda9d7SSteve Wise 568cfdda9d7SSteve Wise BUG_ON(skb_cloned(skb)); 569cfdda9d7SSteve Wise 570cfdda9d7SSteve Wise mpalen = sizeof(*mpa) + ep->plen; 571d2fe99e8SKumar Sanghvi if (mpa_rev_to_use == 2) 572d2fe99e8SKumar Sanghvi mpalen += sizeof(struct mpa_v2_conn_params); 573cfdda9d7SSteve Wise wrlen = roundup(mpalen + sizeof *req, 16); 574cfdda9d7SSteve Wise skb = get_skb(skb, wrlen, GFP_KERNEL); 575cfdda9d7SSteve Wise if (!skb) { 576cfdda9d7SSteve Wise connect_reply_upcall(ep, -ENOMEM); 577cfdda9d7SSteve Wise return; 578cfdda9d7SSteve Wise } 579cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 580cfdda9d7SSteve Wise 581cfdda9d7SSteve Wise req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 582cfdda9d7SSteve Wise memset(req, 0, wrlen); 583cfdda9d7SSteve Wise req->op_to_immdlen = cpu_to_be32( 584cfdda9d7SSteve Wise FW_WR_OP(FW_OFLD_TX_DATA_WR) | 585cfdda9d7SSteve Wise FW_WR_COMPL(1) | 586cfdda9d7SSteve Wise FW_WR_IMMDLEN(mpalen)); 587cfdda9d7SSteve Wise req->flowid_len16 = cpu_to_be32( 588cfdda9d7SSteve Wise FW_WR_FLOWID(ep->hwtid) | 589cfdda9d7SSteve Wise FW_WR_LEN16(wrlen >> 4)); 590cfdda9d7SSteve Wise req->plen = cpu_to_be32(mpalen); 591cfdda9d7SSteve Wise req->tunnel_to_proxy = cpu_to_be32( 592cfdda9d7SSteve Wise FW_OFLD_TX_DATA_WR_FLUSH(1) | 593cfdda9d7SSteve Wise FW_OFLD_TX_DATA_WR_SHOVE(1)); 594cfdda9d7SSteve Wise 595cfdda9d7SSteve Wise mpa = (struct mpa_message *)(req + 1); 596cfdda9d7SSteve Wise memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); 597cfdda9d7SSteve Wise mpa->flags = (crc_enabled ? MPA_CRC : 0) | 598d2fe99e8SKumar Sanghvi (markers_enabled ? MPA_MARKERS : 0) | 599d2fe99e8SKumar Sanghvi (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); 600cfdda9d7SSteve Wise mpa->private_data_size = htons(ep->plen); 601d2fe99e8SKumar Sanghvi mpa->revision = mpa_rev_to_use; 60201b225e1SKumar Sanghvi if (mpa_rev_to_use == 1) { 603d2fe99e8SKumar Sanghvi ep->tried_with_mpa_v1 = 1; 60401b225e1SKumar Sanghvi ep->retry_with_mpa_v1 = 0; 60501b225e1SKumar Sanghvi } 606d2fe99e8SKumar Sanghvi 607d2fe99e8SKumar Sanghvi if (mpa_rev_to_use == 2) { 608f747c34aSRoland Dreier mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 609f747c34aSRoland Dreier sizeof (struct mpa_v2_conn_params)); 610d2fe99e8SKumar Sanghvi mpa_v2_params.ird = htons((u16)ep->ird); 611d2fe99e8SKumar Sanghvi mpa_v2_params.ord = htons((u16)ep->ord); 612d2fe99e8SKumar Sanghvi 613d2fe99e8SKumar Sanghvi if (peer2peer) { 614d2fe99e8SKumar Sanghvi mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 615d2fe99e8SKumar Sanghvi if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) 616d2fe99e8SKumar Sanghvi mpa_v2_params.ord |= 617d2fe99e8SKumar Sanghvi htons(MPA_V2_RDMA_WRITE_RTR); 618d2fe99e8SKumar Sanghvi else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) 619d2fe99e8SKumar Sanghvi mpa_v2_params.ord |= 620d2fe99e8SKumar Sanghvi htons(MPA_V2_RDMA_READ_RTR); 621d2fe99e8SKumar Sanghvi } 622d2fe99e8SKumar Sanghvi memcpy(mpa->private_data, &mpa_v2_params, 623d2fe99e8SKumar Sanghvi sizeof(struct mpa_v2_conn_params)); 624cfdda9d7SSteve Wise 625cfdda9d7SSteve Wise if (ep->plen) 626d2fe99e8SKumar Sanghvi memcpy(mpa->private_data + 627d2fe99e8SKumar Sanghvi sizeof(struct mpa_v2_conn_params), 628d2fe99e8SKumar Sanghvi ep->mpa_pkt + sizeof(*mpa), ep->plen); 629d2fe99e8SKumar Sanghvi } else 630d2fe99e8SKumar Sanghvi if (ep->plen) 631d2fe99e8SKumar Sanghvi memcpy(mpa->private_data, 632d2fe99e8SKumar Sanghvi ep->mpa_pkt + sizeof(*mpa), ep->plen); 633cfdda9d7SSteve Wise 634cfdda9d7SSteve Wise /* 635cfdda9d7SSteve Wise * Reference the mpa skb. This ensures the data area 636cfdda9d7SSteve Wise * will remain in memory until the hw acks the tx. 637cfdda9d7SSteve Wise * Function fw4_ack() will deref it. 638cfdda9d7SSteve Wise */ 639cfdda9d7SSteve Wise skb_get(skb); 640cfdda9d7SSteve Wise t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 641cfdda9d7SSteve Wise BUG_ON(ep->mpa_skb); 642cfdda9d7SSteve Wise ep->mpa_skb = skb; 643cfdda9d7SSteve Wise c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 644cfdda9d7SSteve Wise start_ep_timer(ep); 645cfdda9d7SSteve Wise state_set(&ep->com, MPA_REQ_SENT); 646cfdda9d7SSteve Wise ep->mpa_attr.initiator = 1; 647cfdda9d7SSteve Wise return; 648cfdda9d7SSteve Wise } 649cfdda9d7SSteve Wise 650cfdda9d7SSteve Wise static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) 651cfdda9d7SSteve Wise { 652cfdda9d7SSteve Wise int mpalen, wrlen; 653cfdda9d7SSteve Wise struct fw_ofld_tx_data_wr *req; 654cfdda9d7SSteve Wise struct mpa_message *mpa; 655cfdda9d7SSteve Wise struct sk_buff *skb; 656d2fe99e8SKumar Sanghvi struct mpa_v2_conn_params mpa_v2_params; 657cfdda9d7SSteve Wise 658cfdda9d7SSteve Wise PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 659cfdda9d7SSteve Wise 660cfdda9d7SSteve Wise mpalen = sizeof(*mpa) + plen; 661d2fe99e8SKumar Sanghvi if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) 662d2fe99e8SKumar Sanghvi mpalen += sizeof(struct mpa_v2_conn_params); 663cfdda9d7SSteve Wise wrlen = roundup(mpalen + sizeof *req, 16); 664cfdda9d7SSteve Wise 665cfdda9d7SSteve Wise skb = get_skb(NULL, wrlen, GFP_KERNEL); 666cfdda9d7SSteve Wise if (!skb) { 667cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); 668cfdda9d7SSteve Wise return -ENOMEM; 669cfdda9d7SSteve Wise } 670cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 671cfdda9d7SSteve Wise 672cfdda9d7SSteve Wise req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 673cfdda9d7SSteve Wise memset(req, 0, wrlen); 674cfdda9d7SSteve Wise req->op_to_immdlen = cpu_to_be32( 675cfdda9d7SSteve Wise FW_WR_OP(FW_OFLD_TX_DATA_WR) | 676cfdda9d7SSteve Wise FW_WR_COMPL(1) | 677cfdda9d7SSteve Wise FW_WR_IMMDLEN(mpalen)); 678cfdda9d7SSteve Wise req->flowid_len16 = cpu_to_be32( 679cfdda9d7SSteve Wise FW_WR_FLOWID(ep->hwtid) | 680cfdda9d7SSteve Wise FW_WR_LEN16(wrlen >> 4)); 681cfdda9d7SSteve Wise req->plen = cpu_to_be32(mpalen); 682cfdda9d7SSteve Wise req->tunnel_to_proxy = cpu_to_be32( 683cfdda9d7SSteve Wise FW_OFLD_TX_DATA_WR_FLUSH(1) | 684cfdda9d7SSteve Wise FW_OFLD_TX_DATA_WR_SHOVE(1)); 685cfdda9d7SSteve Wise 686cfdda9d7SSteve Wise mpa = (struct mpa_message *)(req + 1); 687cfdda9d7SSteve Wise memset(mpa, 0, sizeof(*mpa)); 688cfdda9d7SSteve Wise memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 689cfdda9d7SSteve Wise mpa->flags = MPA_REJECT; 690cfdda9d7SSteve Wise mpa->revision = mpa_rev; 691cfdda9d7SSteve Wise mpa->private_data_size = htons(plen); 692d2fe99e8SKumar Sanghvi 693d2fe99e8SKumar Sanghvi if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 694d2fe99e8SKumar Sanghvi mpa->flags |= MPA_ENHANCED_RDMA_CONN; 695f747c34aSRoland Dreier mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 696f747c34aSRoland Dreier sizeof (struct mpa_v2_conn_params)); 697d2fe99e8SKumar Sanghvi mpa_v2_params.ird = htons(((u16)ep->ird) | 698d2fe99e8SKumar Sanghvi (peer2peer ? MPA_V2_PEER2PEER_MODEL : 699d2fe99e8SKumar Sanghvi 0)); 700d2fe99e8SKumar Sanghvi mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? 701d2fe99e8SKumar Sanghvi (p2p_type == 702d2fe99e8SKumar Sanghvi FW_RI_INIT_P2PTYPE_RDMA_WRITE ? 703d2fe99e8SKumar Sanghvi MPA_V2_RDMA_WRITE_RTR : p2p_type == 704d2fe99e8SKumar Sanghvi FW_RI_INIT_P2PTYPE_READ_REQ ? 705d2fe99e8SKumar Sanghvi MPA_V2_RDMA_READ_RTR : 0) : 0)); 706d2fe99e8SKumar Sanghvi memcpy(mpa->private_data, &mpa_v2_params, 707d2fe99e8SKumar Sanghvi sizeof(struct mpa_v2_conn_params)); 708d2fe99e8SKumar Sanghvi 709d2fe99e8SKumar Sanghvi if (ep->plen) 710d2fe99e8SKumar Sanghvi memcpy(mpa->private_data + 711d2fe99e8SKumar Sanghvi sizeof(struct mpa_v2_conn_params), pdata, plen); 712d2fe99e8SKumar Sanghvi } else 713cfdda9d7SSteve Wise if (plen) 714cfdda9d7SSteve Wise memcpy(mpa->private_data, pdata, plen); 715cfdda9d7SSteve Wise 716cfdda9d7SSteve Wise /* 717cfdda9d7SSteve Wise * Reference the mpa skb again. This ensures the data area 718cfdda9d7SSteve Wise * will remain in memory until the hw acks the tx. 719cfdda9d7SSteve Wise * Function fw4_ack() will deref it. 720cfdda9d7SSteve Wise */ 721cfdda9d7SSteve Wise skb_get(skb); 722cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 723cfdda9d7SSteve Wise t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 724cfdda9d7SSteve Wise BUG_ON(ep->mpa_skb); 725cfdda9d7SSteve Wise ep->mpa_skb = skb; 726cfdda9d7SSteve Wise return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 727cfdda9d7SSteve Wise } 728cfdda9d7SSteve Wise 729cfdda9d7SSteve Wise static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) 730cfdda9d7SSteve Wise { 731cfdda9d7SSteve Wise int mpalen, wrlen; 732cfdda9d7SSteve Wise struct fw_ofld_tx_data_wr *req; 733cfdda9d7SSteve Wise struct mpa_message *mpa; 734cfdda9d7SSteve Wise struct sk_buff *skb; 735d2fe99e8SKumar Sanghvi struct mpa_v2_conn_params mpa_v2_params; 736cfdda9d7SSteve Wise 737cfdda9d7SSteve Wise PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 738cfdda9d7SSteve Wise 739cfdda9d7SSteve Wise mpalen = sizeof(*mpa) + plen; 740d2fe99e8SKumar Sanghvi if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) 741d2fe99e8SKumar Sanghvi mpalen += sizeof(struct mpa_v2_conn_params); 742cfdda9d7SSteve Wise wrlen = roundup(mpalen + sizeof *req, 16); 743cfdda9d7SSteve Wise 744cfdda9d7SSteve Wise skb = get_skb(NULL, wrlen, GFP_KERNEL); 745cfdda9d7SSteve Wise if (!skb) { 746cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); 747cfdda9d7SSteve Wise return -ENOMEM; 748cfdda9d7SSteve Wise } 749cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 750cfdda9d7SSteve Wise 751cfdda9d7SSteve Wise req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); 752cfdda9d7SSteve Wise memset(req, 0, wrlen); 753cfdda9d7SSteve Wise req->op_to_immdlen = cpu_to_be32( 754cfdda9d7SSteve Wise FW_WR_OP(FW_OFLD_TX_DATA_WR) | 755cfdda9d7SSteve Wise FW_WR_COMPL(1) | 756cfdda9d7SSteve Wise FW_WR_IMMDLEN(mpalen)); 757cfdda9d7SSteve Wise req->flowid_len16 = cpu_to_be32( 758cfdda9d7SSteve Wise FW_WR_FLOWID(ep->hwtid) | 759cfdda9d7SSteve Wise FW_WR_LEN16(wrlen >> 4)); 760cfdda9d7SSteve Wise req->plen = cpu_to_be32(mpalen); 761cfdda9d7SSteve Wise req->tunnel_to_proxy = cpu_to_be32( 762cfdda9d7SSteve Wise FW_OFLD_TX_DATA_WR_FLUSH(1) | 763cfdda9d7SSteve Wise FW_OFLD_TX_DATA_WR_SHOVE(1)); 764cfdda9d7SSteve Wise 765cfdda9d7SSteve Wise mpa = (struct mpa_message *)(req + 1); 766cfdda9d7SSteve Wise memset(mpa, 0, sizeof(*mpa)); 767cfdda9d7SSteve Wise memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 768cfdda9d7SSteve Wise mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | 769cfdda9d7SSteve Wise (markers_enabled ? MPA_MARKERS : 0); 770d2fe99e8SKumar Sanghvi mpa->revision = ep->mpa_attr.version; 771cfdda9d7SSteve Wise mpa->private_data_size = htons(plen); 772d2fe99e8SKumar Sanghvi 773d2fe99e8SKumar Sanghvi if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 774d2fe99e8SKumar Sanghvi mpa->flags |= MPA_ENHANCED_RDMA_CONN; 775f747c34aSRoland Dreier mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 776f747c34aSRoland Dreier sizeof (struct mpa_v2_conn_params)); 777d2fe99e8SKumar Sanghvi mpa_v2_params.ird = htons((u16)ep->ird); 778d2fe99e8SKumar Sanghvi mpa_v2_params.ord = htons((u16)ep->ord); 779d2fe99e8SKumar Sanghvi if (peer2peer && (ep->mpa_attr.p2p_type != 780d2fe99e8SKumar Sanghvi FW_RI_INIT_P2PTYPE_DISABLED)) { 781d2fe99e8SKumar Sanghvi mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 782d2fe99e8SKumar Sanghvi 783d2fe99e8SKumar Sanghvi if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) 784d2fe99e8SKumar Sanghvi mpa_v2_params.ord |= 785d2fe99e8SKumar Sanghvi htons(MPA_V2_RDMA_WRITE_RTR); 786d2fe99e8SKumar Sanghvi else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) 787d2fe99e8SKumar Sanghvi mpa_v2_params.ord |= 788d2fe99e8SKumar Sanghvi htons(MPA_V2_RDMA_READ_RTR); 789d2fe99e8SKumar Sanghvi } 790d2fe99e8SKumar Sanghvi 791d2fe99e8SKumar Sanghvi memcpy(mpa->private_data, &mpa_v2_params, 792d2fe99e8SKumar Sanghvi sizeof(struct mpa_v2_conn_params)); 793d2fe99e8SKumar Sanghvi 794d2fe99e8SKumar Sanghvi if (ep->plen) 795d2fe99e8SKumar Sanghvi memcpy(mpa->private_data + 796d2fe99e8SKumar Sanghvi sizeof(struct mpa_v2_conn_params), pdata, plen); 797d2fe99e8SKumar Sanghvi } else 798cfdda9d7SSteve Wise if (plen) 799cfdda9d7SSteve Wise memcpy(mpa->private_data, pdata, plen); 800cfdda9d7SSteve Wise 801cfdda9d7SSteve Wise /* 802cfdda9d7SSteve Wise * Reference the mpa skb. This ensures the data area 803cfdda9d7SSteve Wise * will remain in memory until the hw acks the tx. 804cfdda9d7SSteve Wise * Function fw4_ack() will deref it. 805cfdda9d7SSteve Wise */ 806cfdda9d7SSteve Wise skb_get(skb); 807cfdda9d7SSteve Wise t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 808cfdda9d7SSteve Wise ep->mpa_skb = skb; 809cfdda9d7SSteve Wise state_set(&ep->com, MPA_REP_SENT); 810cfdda9d7SSteve Wise return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 811cfdda9d7SSteve Wise } 812cfdda9d7SSteve Wise 813cfdda9d7SSteve Wise static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) 814cfdda9d7SSteve Wise { 815cfdda9d7SSteve Wise struct c4iw_ep *ep; 816cfdda9d7SSteve Wise struct cpl_act_establish *req = cplhdr(skb); 817cfdda9d7SSteve Wise unsigned int tid = GET_TID(req); 818cfdda9d7SSteve Wise unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); 819cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 820cfdda9d7SSteve Wise 821cfdda9d7SSteve Wise ep = lookup_atid(t, atid); 822cfdda9d7SSteve Wise 823cfdda9d7SSteve Wise PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid, 824cfdda9d7SSteve Wise be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); 825cfdda9d7SSteve Wise 826cfdda9d7SSteve Wise dst_confirm(ep->dst); 827cfdda9d7SSteve Wise 828cfdda9d7SSteve Wise /* setup the hwtid for this connection */ 829cfdda9d7SSteve Wise ep->hwtid = tid; 830cfdda9d7SSteve Wise cxgb4_insert_tid(t, ep, tid); 831793dad94SVipul Pandya insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid); 832cfdda9d7SSteve Wise 833cfdda9d7SSteve Wise ep->snd_seq = be32_to_cpu(req->snd_isn); 834cfdda9d7SSteve Wise ep->rcv_seq = be32_to_cpu(req->rcv_isn); 835cfdda9d7SSteve Wise 836cfdda9d7SSteve Wise set_emss(ep, ntohs(req->tcp_opt)); 837cfdda9d7SSteve Wise 838cfdda9d7SSteve Wise /* dealloc the atid */ 839793dad94SVipul Pandya remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); 840cfdda9d7SSteve Wise cxgb4_free_atid(t, atid); 841793dad94SVipul Pandya set_bit(ACT_ESTAB, &ep->com.history); 842cfdda9d7SSteve Wise 843cfdda9d7SSteve Wise /* start MPA negotiation */ 844cfdda9d7SSteve Wise send_flowc(ep, NULL); 845d2fe99e8SKumar Sanghvi if (ep->retry_with_mpa_v1) 846d2fe99e8SKumar Sanghvi send_mpa_req(ep, skb, 1); 847d2fe99e8SKumar Sanghvi else 848d2fe99e8SKumar Sanghvi send_mpa_req(ep, skb, mpa_rev); 849cfdda9d7SSteve Wise 850cfdda9d7SSteve Wise return 0; 851cfdda9d7SSteve Wise } 852cfdda9d7SSteve Wise 853cfdda9d7SSteve Wise static void close_complete_upcall(struct c4iw_ep *ep) 854cfdda9d7SSteve Wise { 855cfdda9d7SSteve Wise struct iw_cm_event event; 856cfdda9d7SSteve Wise 857cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 858cfdda9d7SSteve Wise memset(&event, 0, sizeof(event)); 859cfdda9d7SSteve Wise event.event = IW_CM_EVENT_CLOSE; 860cfdda9d7SSteve Wise if (ep->com.cm_id) { 861cfdda9d7SSteve Wise PDBG("close complete delivered ep %p cm_id %p tid %u\n", 862cfdda9d7SSteve Wise ep, ep->com.cm_id, ep->hwtid); 863cfdda9d7SSteve Wise ep->com.cm_id->event_handler(ep->com.cm_id, &event); 864cfdda9d7SSteve Wise ep->com.cm_id->rem_ref(ep->com.cm_id); 865cfdda9d7SSteve Wise ep->com.cm_id = NULL; 866cfdda9d7SSteve Wise ep->com.qp = NULL; 867793dad94SVipul Pandya set_bit(CLOSE_UPCALL, &ep->com.history); 868cfdda9d7SSteve Wise } 869cfdda9d7SSteve Wise } 870cfdda9d7SSteve Wise 871cfdda9d7SSteve Wise static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) 872cfdda9d7SSteve Wise { 873cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 874cfdda9d7SSteve Wise close_complete_upcall(ep); 875cfdda9d7SSteve Wise state_set(&ep->com, ABORTING); 876793dad94SVipul Pandya set_bit(ABORT_CONN, &ep->com.history); 877cfdda9d7SSteve Wise return send_abort(ep, skb, gfp); 878cfdda9d7SSteve Wise } 879cfdda9d7SSteve Wise 880cfdda9d7SSteve Wise static void peer_close_upcall(struct c4iw_ep *ep) 881cfdda9d7SSteve Wise { 882cfdda9d7SSteve Wise struct iw_cm_event event; 883cfdda9d7SSteve Wise 884cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 885cfdda9d7SSteve Wise memset(&event, 0, sizeof(event)); 886cfdda9d7SSteve Wise event.event = IW_CM_EVENT_DISCONNECT; 887cfdda9d7SSteve Wise if (ep->com.cm_id) { 888cfdda9d7SSteve Wise PDBG("peer close delivered ep %p cm_id %p tid %u\n", 889cfdda9d7SSteve Wise ep, ep->com.cm_id, ep->hwtid); 890cfdda9d7SSteve Wise ep->com.cm_id->event_handler(ep->com.cm_id, &event); 891793dad94SVipul Pandya set_bit(DISCONN_UPCALL, &ep->com.history); 892cfdda9d7SSteve Wise } 893cfdda9d7SSteve Wise } 894cfdda9d7SSteve Wise 895cfdda9d7SSteve Wise static void peer_abort_upcall(struct c4iw_ep *ep) 896cfdda9d7SSteve Wise { 897cfdda9d7SSteve Wise struct iw_cm_event event; 898cfdda9d7SSteve Wise 899cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 900cfdda9d7SSteve Wise memset(&event, 0, sizeof(event)); 901cfdda9d7SSteve Wise event.event = IW_CM_EVENT_CLOSE; 902cfdda9d7SSteve Wise event.status = -ECONNRESET; 903cfdda9d7SSteve Wise if (ep->com.cm_id) { 904cfdda9d7SSteve Wise PDBG("abort delivered ep %p cm_id %p tid %u\n", ep, 905cfdda9d7SSteve Wise ep->com.cm_id, ep->hwtid); 906cfdda9d7SSteve Wise ep->com.cm_id->event_handler(ep->com.cm_id, &event); 907cfdda9d7SSteve Wise ep->com.cm_id->rem_ref(ep->com.cm_id); 908cfdda9d7SSteve Wise ep->com.cm_id = NULL; 909cfdda9d7SSteve Wise ep->com.qp = NULL; 910793dad94SVipul Pandya set_bit(ABORT_UPCALL, &ep->com.history); 911cfdda9d7SSteve Wise } 912cfdda9d7SSteve Wise } 913cfdda9d7SSteve Wise 914cfdda9d7SSteve Wise static void connect_reply_upcall(struct c4iw_ep *ep, int status) 915cfdda9d7SSteve Wise { 916cfdda9d7SSteve Wise struct iw_cm_event event; 917cfdda9d7SSteve Wise 918cfdda9d7SSteve Wise PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status); 919cfdda9d7SSteve Wise memset(&event, 0, sizeof(event)); 920cfdda9d7SSteve Wise event.event = IW_CM_EVENT_CONNECT_REPLY; 921cfdda9d7SSteve Wise event.status = status; 922cfdda9d7SSteve Wise event.local_addr = ep->com.local_addr; 923cfdda9d7SSteve Wise event.remote_addr = ep->com.remote_addr; 924cfdda9d7SSteve Wise 925cfdda9d7SSteve Wise if ((status == 0) || (status == -ECONNREFUSED)) { 926d2fe99e8SKumar Sanghvi if (!ep->tried_with_mpa_v1) { 927d2fe99e8SKumar Sanghvi /* this means MPA_v2 is used */ 928d2fe99e8SKumar Sanghvi event.private_data_len = ep->plen - 929d2fe99e8SKumar Sanghvi sizeof(struct mpa_v2_conn_params); 930d2fe99e8SKumar Sanghvi event.private_data = ep->mpa_pkt + 931d2fe99e8SKumar Sanghvi sizeof(struct mpa_message) + 932d2fe99e8SKumar Sanghvi sizeof(struct mpa_v2_conn_params); 933d2fe99e8SKumar Sanghvi } else { 934d2fe99e8SKumar Sanghvi /* this means MPA_v1 is used */ 935cfdda9d7SSteve Wise event.private_data_len = ep->plen; 936d2fe99e8SKumar Sanghvi event.private_data = ep->mpa_pkt + 937d2fe99e8SKumar Sanghvi sizeof(struct mpa_message); 938d2fe99e8SKumar Sanghvi } 939cfdda9d7SSteve Wise } 94085963e4cSRoland Dreier 941cfdda9d7SSteve Wise PDBG("%s ep %p tid %u status %d\n", __func__, ep, 942cfdda9d7SSteve Wise ep->hwtid, status); 943793dad94SVipul Pandya set_bit(CONN_RPL_UPCALL, &ep->com.history); 944cfdda9d7SSteve Wise ep->com.cm_id->event_handler(ep->com.cm_id, &event); 94585963e4cSRoland Dreier 946cfdda9d7SSteve Wise if (status < 0) { 947cfdda9d7SSteve Wise ep->com.cm_id->rem_ref(ep->com.cm_id); 948cfdda9d7SSteve Wise ep->com.cm_id = NULL; 949cfdda9d7SSteve Wise ep->com.qp = NULL; 950cfdda9d7SSteve Wise } 951cfdda9d7SSteve Wise } 952cfdda9d7SSteve Wise 953cfdda9d7SSteve Wise static void connect_request_upcall(struct c4iw_ep *ep) 954cfdda9d7SSteve Wise { 955cfdda9d7SSteve Wise struct iw_cm_event event; 956cfdda9d7SSteve Wise 957cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 958cfdda9d7SSteve Wise memset(&event, 0, sizeof(event)); 959cfdda9d7SSteve Wise event.event = IW_CM_EVENT_CONNECT_REQUEST; 960cfdda9d7SSteve Wise event.local_addr = ep->com.local_addr; 961cfdda9d7SSteve Wise event.remote_addr = ep->com.remote_addr; 962d2fe99e8SKumar Sanghvi event.provider_data = ep; 963d2fe99e8SKumar Sanghvi if (!ep->tried_with_mpa_v1) { 964d2fe99e8SKumar Sanghvi /* this means MPA_v2 is used */ 965d2fe99e8SKumar Sanghvi event.ord = ep->ord; 966d2fe99e8SKumar Sanghvi event.ird = ep->ird; 967d2fe99e8SKumar Sanghvi event.private_data_len = ep->plen - 968d2fe99e8SKumar Sanghvi sizeof(struct mpa_v2_conn_params); 969d2fe99e8SKumar Sanghvi event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + 970d2fe99e8SKumar Sanghvi sizeof(struct mpa_v2_conn_params); 971d2fe99e8SKumar Sanghvi } else { 972d2fe99e8SKumar Sanghvi /* this means MPA_v1 is used. Send max supported */ 973d2fe99e8SKumar Sanghvi event.ord = c4iw_max_read_depth; 974d2fe99e8SKumar Sanghvi event.ird = c4iw_max_read_depth; 975cfdda9d7SSteve Wise event.private_data_len = ep->plen; 976cfdda9d7SSteve Wise event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 977d2fe99e8SKumar Sanghvi } 978cfdda9d7SSteve Wise if (state_read(&ep->parent_ep->com) != DEAD) { 979cfdda9d7SSteve Wise c4iw_get_ep(&ep->com); 980cfdda9d7SSteve Wise ep->parent_ep->com.cm_id->event_handler( 981cfdda9d7SSteve Wise ep->parent_ep->com.cm_id, 982cfdda9d7SSteve Wise &event); 983cfdda9d7SSteve Wise } 984793dad94SVipul Pandya set_bit(CONNREQ_UPCALL, &ep->com.history); 985cfdda9d7SSteve Wise c4iw_put_ep(&ep->parent_ep->com); 986cfdda9d7SSteve Wise ep->parent_ep = NULL; 987cfdda9d7SSteve Wise } 988cfdda9d7SSteve Wise 989cfdda9d7SSteve Wise static void established_upcall(struct c4iw_ep *ep) 990cfdda9d7SSteve Wise { 991cfdda9d7SSteve Wise struct iw_cm_event event; 992cfdda9d7SSteve Wise 993cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 994cfdda9d7SSteve Wise memset(&event, 0, sizeof(event)); 995cfdda9d7SSteve Wise event.event = IW_CM_EVENT_ESTABLISHED; 996d2fe99e8SKumar Sanghvi event.ird = ep->ird; 997d2fe99e8SKumar Sanghvi event.ord = ep->ord; 998cfdda9d7SSteve Wise if (ep->com.cm_id) { 999cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1000cfdda9d7SSteve Wise ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1001793dad94SVipul Pandya set_bit(ESTAB_UPCALL, &ep->com.history); 1002cfdda9d7SSteve Wise } 1003cfdda9d7SSteve Wise } 1004cfdda9d7SSteve Wise 1005cfdda9d7SSteve Wise static int update_rx_credits(struct c4iw_ep *ep, u32 credits) 1006cfdda9d7SSteve Wise { 1007cfdda9d7SSteve Wise struct cpl_rx_data_ack *req; 1008cfdda9d7SSteve Wise struct sk_buff *skb; 1009cfdda9d7SSteve Wise int wrlen = roundup(sizeof *req, 16); 1010cfdda9d7SSteve Wise 1011cfdda9d7SSteve Wise PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); 1012cfdda9d7SSteve Wise skb = get_skb(NULL, wrlen, GFP_KERNEL); 1013cfdda9d7SSteve Wise if (!skb) { 1014cfdda9d7SSteve Wise printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); 1015cfdda9d7SSteve Wise return 0; 1016cfdda9d7SSteve Wise } 1017cfdda9d7SSteve Wise 1018cfdda9d7SSteve Wise req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); 1019cfdda9d7SSteve Wise memset(req, 0, wrlen); 1020cfdda9d7SSteve Wise INIT_TP_WR(req, ep->hwtid); 1021cfdda9d7SSteve Wise OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 1022cfdda9d7SSteve Wise ep->hwtid)); 1023ba6d3925SSteve Wise req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) | 1024ba6d3925SSteve Wise F_RX_DACK_CHANGE | 1025ba6d3925SSteve Wise V_RX_DACK_MODE(dack_mode)); 1026d4f1a5c6SSteve Wise set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); 1027cfdda9d7SSteve Wise c4iw_ofld_send(&ep->com.dev->rdev, skb); 1028cfdda9d7SSteve Wise return credits; 1029cfdda9d7SSteve Wise } 1030cfdda9d7SSteve Wise 1031cfdda9d7SSteve Wise static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) 1032cfdda9d7SSteve Wise { 1033cfdda9d7SSteve Wise struct mpa_message *mpa; 1034d2fe99e8SKumar Sanghvi struct mpa_v2_conn_params *mpa_v2_params; 1035cfdda9d7SSteve Wise u16 plen; 1036d2fe99e8SKumar Sanghvi u16 resp_ird, resp_ord; 1037d2fe99e8SKumar Sanghvi u8 rtr_mismatch = 0, insuff_ird = 0; 1038cfdda9d7SSteve Wise struct c4iw_qp_attributes attrs; 1039cfdda9d7SSteve Wise enum c4iw_qp_attr_mask mask; 1040cfdda9d7SSteve Wise int err; 1041cfdda9d7SSteve Wise 1042cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1043cfdda9d7SSteve Wise 1044cfdda9d7SSteve Wise /* 1045cfdda9d7SSteve Wise * Stop mpa timer. If it expired, then the state has 1046cfdda9d7SSteve Wise * changed and we bail since ep_timeout already aborted 1047cfdda9d7SSteve Wise * the connection. 1048cfdda9d7SSteve Wise */ 1049cfdda9d7SSteve Wise stop_ep_timer(ep); 1050cfdda9d7SSteve Wise if (state_read(&ep->com) != MPA_REQ_SENT) 1051cfdda9d7SSteve Wise return; 1052cfdda9d7SSteve Wise 1053cfdda9d7SSteve Wise /* 1054cfdda9d7SSteve Wise * If we get more than the supported amount of private data 1055cfdda9d7SSteve Wise * then we must fail this connection. 1056cfdda9d7SSteve Wise */ 1057cfdda9d7SSteve Wise if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 1058cfdda9d7SSteve Wise err = -EINVAL; 1059cfdda9d7SSteve Wise goto err; 1060cfdda9d7SSteve Wise } 1061cfdda9d7SSteve Wise 1062cfdda9d7SSteve Wise /* 1063cfdda9d7SSteve Wise * copy the new data into our accumulation buffer. 1064cfdda9d7SSteve Wise */ 1065cfdda9d7SSteve Wise skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 1066cfdda9d7SSteve Wise skb->len); 1067cfdda9d7SSteve Wise ep->mpa_pkt_len += skb->len; 1068cfdda9d7SSteve Wise 1069cfdda9d7SSteve Wise /* 1070cfdda9d7SSteve Wise * if we don't even have the mpa message, then bail. 1071cfdda9d7SSteve Wise */ 1072cfdda9d7SSteve Wise if (ep->mpa_pkt_len < sizeof(*mpa)) 1073cfdda9d7SSteve Wise return; 1074cfdda9d7SSteve Wise mpa = (struct mpa_message *) ep->mpa_pkt; 1075cfdda9d7SSteve Wise 1076cfdda9d7SSteve Wise /* Validate MPA header. */ 1077d2fe99e8SKumar Sanghvi if (mpa->revision > mpa_rev) { 1078d2fe99e8SKumar Sanghvi printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," 1079d2fe99e8SKumar Sanghvi " Received = %d\n", __func__, mpa_rev, mpa->revision); 1080cfdda9d7SSteve Wise err = -EPROTO; 1081cfdda9d7SSteve Wise goto err; 1082cfdda9d7SSteve Wise } 1083cfdda9d7SSteve Wise if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { 1084cfdda9d7SSteve Wise err = -EPROTO; 1085cfdda9d7SSteve Wise goto err; 1086cfdda9d7SSteve Wise } 1087cfdda9d7SSteve Wise 1088cfdda9d7SSteve Wise plen = ntohs(mpa->private_data_size); 1089cfdda9d7SSteve Wise 1090cfdda9d7SSteve Wise /* 1091cfdda9d7SSteve Wise * Fail if there's too much private data. 1092cfdda9d7SSteve Wise */ 1093cfdda9d7SSteve Wise if (plen > MPA_MAX_PRIVATE_DATA) { 1094cfdda9d7SSteve Wise err = -EPROTO; 1095cfdda9d7SSteve Wise goto err; 1096cfdda9d7SSteve Wise } 1097cfdda9d7SSteve Wise 1098cfdda9d7SSteve Wise /* 1099cfdda9d7SSteve Wise * If plen does not account for pkt size 1100cfdda9d7SSteve Wise */ 1101cfdda9d7SSteve Wise if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1102cfdda9d7SSteve Wise err = -EPROTO; 1103cfdda9d7SSteve Wise goto err; 1104cfdda9d7SSteve Wise } 1105cfdda9d7SSteve Wise 1106cfdda9d7SSteve Wise ep->plen = (u8) plen; 1107cfdda9d7SSteve Wise 1108cfdda9d7SSteve Wise /* 1109cfdda9d7SSteve Wise * If we don't have all the pdata yet, then bail. 1110cfdda9d7SSteve Wise * We'll continue process when more data arrives. 1111cfdda9d7SSteve Wise */ 1112cfdda9d7SSteve Wise if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1113cfdda9d7SSteve Wise return; 1114cfdda9d7SSteve Wise 1115cfdda9d7SSteve Wise if (mpa->flags & MPA_REJECT) { 1116cfdda9d7SSteve Wise err = -ECONNREFUSED; 1117cfdda9d7SSteve Wise goto err; 1118cfdda9d7SSteve Wise } 1119cfdda9d7SSteve Wise 1120cfdda9d7SSteve Wise /* 1121cfdda9d7SSteve Wise * If we get here we have accumulated the entire mpa 1122cfdda9d7SSteve Wise * start reply message including private data. And 1123cfdda9d7SSteve Wise * the MPA header is valid. 1124cfdda9d7SSteve Wise */ 1125cfdda9d7SSteve Wise state_set(&ep->com, FPDU_MODE); 1126cfdda9d7SSteve Wise ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1127cfdda9d7SSteve Wise ep->mpa_attr.recv_marker_enabled = markers_enabled; 1128cfdda9d7SSteve Wise ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1129d2fe99e8SKumar Sanghvi ep->mpa_attr.version = mpa->revision; 1130d2fe99e8SKumar Sanghvi ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1131d2fe99e8SKumar Sanghvi 1132d2fe99e8SKumar Sanghvi if (mpa->revision == 2) { 1133d2fe99e8SKumar Sanghvi ep->mpa_attr.enhanced_rdma_conn = 1134d2fe99e8SKumar Sanghvi mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1135d2fe99e8SKumar Sanghvi if (ep->mpa_attr.enhanced_rdma_conn) { 1136d2fe99e8SKumar Sanghvi mpa_v2_params = (struct mpa_v2_conn_params *) 1137d2fe99e8SKumar Sanghvi (ep->mpa_pkt + sizeof(*mpa)); 1138d2fe99e8SKumar Sanghvi resp_ird = ntohs(mpa_v2_params->ird) & 1139d2fe99e8SKumar Sanghvi MPA_V2_IRD_ORD_MASK; 1140d2fe99e8SKumar Sanghvi resp_ord = ntohs(mpa_v2_params->ord) & 1141d2fe99e8SKumar Sanghvi MPA_V2_IRD_ORD_MASK; 1142d2fe99e8SKumar Sanghvi 1143d2fe99e8SKumar Sanghvi /* 1144d2fe99e8SKumar Sanghvi * This is a double-check. Ideally, below checks are 1145d2fe99e8SKumar Sanghvi * not required since ird/ord stuff has been taken 1146d2fe99e8SKumar Sanghvi * care of in c4iw_accept_cr 1147d2fe99e8SKumar Sanghvi */ 1148d2fe99e8SKumar Sanghvi if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) { 1149d2fe99e8SKumar Sanghvi err = -ENOMEM; 1150d2fe99e8SKumar Sanghvi ep->ird = resp_ord; 1151d2fe99e8SKumar Sanghvi ep->ord = resp_ird; 1152d2fe99e8SKumar Sanghvi insuff_ird = 1; 1153d2fe99e8SKumar Sanghvi } 1154d2fe99e8SKumar Sanghvi 1155d2fe99e8SKumar Sanghvi if (ntohs(mpa_v2_params->ird) & 1156d2fe99e8SKumar Sanghvi MPA_V2_PEER2PEER_MODEL) { 1157d2fe99e8SKumar Sanghvi if (ntohs(mpa_v2_params->ord) & 1158d2fe99e8SKumar Sanghvi MPA_V2_RDMA_WRITE_RTR) 1159d2fe99e8SKumar Sanghvi ep->mpa_attr.p2p_type = 1160d2fe99e8SKumar Sanghvi FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1161d2fe99e8SKumar Sanghvi else if (ntohs(mpa_v2_params->ord) & 1162d2fe99e8SKumar Sanghvi MPA_V2_RDMA_READ_RTR) 1163d2fe99e8SKumar Sanghvi ep->mpa_attr.p2p_type = 1164d2fe99e8SKumar Sanghvi FW_RI_INIT_P2PTYPE_READ_REQ; 1165d2fe99e8SKumar Sanghvi } 1166d2fe99e8SKumar Sanghvi } 1167d2fe99e8SKumar Sanghvi } else if (mpa->revision == 1) 1168d2fe99e8SKumar Sanghvi if (peer2peer) 1169d2fe99e8SKumar Sanghvi ep->mpa_attr.p2p_type = p2p_type; 1170d2fe99e8SKumar Sanghvi 1171cfdda9d7SSteve Wise PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 1172d2fe99e8SKumar Sanghvi "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = " 1173d2fe99e8SKumar Sanghvi "%d\n", __func__, ep->mpa_attr.crc_enabled, 1174d2fe99e8SKumar Sanghvi ep->mpa_attr.recv_marker_enabled, 1175d2fe99e8SKumar Sanghvi ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1176d2fe99e8SKumar Sanghvi ep->mpa_attr.p2p_type, p2p_type); 1177d2fe99e8SKumar Sanghvi 1178d2fe99e8SKumar Sanghvi /* 1179d2fe99e8SKumar Sanghvi * If responder's RTR does not match with that of initiator, assign 1180d2fe99e8SKumar Sanghvi * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not 1181d2fe99e8SKumar Sanghvi * generated when moving QP to RTS state. 1182d2fe99e8SKumar Sanghvi * A TERM message will be sent after QP has moved to RTS state 1183d2fe99e8SKumar Sanghvi */ 118491018f86SKumar Sanghvi if ((ep->mpa_attr.version == 2) && peer2peer && 1185d2fe99e8SKumar Sanghvi (ep->mpa_attr.p2p_type != p2p_type)) { 1186d2fe99e8SKumar Sanghvi ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1187d2fe99e8SKumar Sanghvi rtr_mismatch = 1; 1188d2fe99e8SKumar Sanghvi } 1189cfdda9d7SSteve Wise 1190cfdda9d7SSteve Wise attrs.mpa_attr = ep->mpa_attr; 1191cfdda9d7SSteve Wise attrs.max_ird = ep->ird; 1192cfdda9d7SSteve Wise attrs.max_ord = ep->ord; 1193cfdda9d7SSteve Wise attrs.llp_stream_handle = ep; 1194cfdda9d7SSteve Wise attrs.next_state = C4IW_QP_STATE_RTS; 1195cfdda9d7SSteve Wise 1196cfdda9d7SSteve Wise mask = C4IW_QP_ATTR_NEXT_STATE | 1197cfdda9d7SSteve Wise C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | 1198cfdda9d7SSteve Wise C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; 1199cfdda9d7SSteve Wise 1200cfdda9d7SSteve Wise /* bind QP and TID with INIT_WR */ 1201cfdda9d7SSteve Wise err = c4iw_modify_qp(ep->com.qp->rhp, 1202cfdda9d7SSteve Wise ep->com.qp, mask, &attrs, 1); 1203cfdda9d7SSteve Wise if (err) 1204cfdda9d7SSteve Wise goto err; 1205d2fe99e8SKumar Sanghvi 1206d2fe99e8SKumar Sanghvi /* 1207d2fe99e8SKumar Sanghvi * If responder's RTR requirement did not match with what initiator 1208d2fe99e8SKumar Sanghvi * supports, generate TERM message 1209d2fe99e8SKumar Sanghvi */ 1210d2fe99e8SKumar Sanghvi if (rtr_mismatch) { 1211d2fe99e8SKumar Sanghvi printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__); 1212d2fe99e8SKumar Sanghvi attrs.layer_etype = LAYER_MPA | DDP_LLP; 1213d2fe99e8SKumar Sanghvi attrs.ecode = MPA_NOMATCH_RTR; 1214d2fe99e8SKumar Sanghvi attrs.next_state = C4IW_QP_STATE_TERMINATE; 1215d2fe99e8SKumar Sanghvi err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1216d2fe99e8SKumar Sanghvi C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1217d2fe99e8SKumar Sanghvi err = -ENOMEM; 1218d2fe99e8SKumar Sanghvi goto out; 1219d2fe99e8SKumar Sanghvi } 1220d2fe99e8SKumar Sanghvi 1221d2fe99e8SKumar Sanghvi /* 1222d2fe99e8SKumar Sanghvi * Generate TERM if initiator IRD is not sufficient for responder 1223d2fe99e8SKumar Sanghvi * provided ORD. Currently, we do the same behaviour even when 1224d2fe99e8SKumar Sanghvi * responder provided IRD is also not sufficient as regards to 1225d2fe99e8SKumar Sanghvi * initiator ORD. 1226d2fe99e8SKumar Sanghvi */ 1227d2fe99e8SKumar Sanghvi if (insuff_ird) { 1228d2fe99e8SKumar Sanghvi printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n", 1229d2fe99e8SKumar Sanghvi __func__); 1230d2fe99e8SKumar Sanghvi attrs.layer_etype = LAYER_MPA | DDP_LLP; 1231d2fe99e8SKumar Sanghvi attrs.ecode = MPA_INSUFF_IRD; 1232d2fe99e8SKumar Sanghvi attrs.next_state = C4IW_QP_STATE_TERMINATE; 1233d2fe99e8SKumar Sanghvi err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1234d2fe99e8SKumar Sanghvi C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1235d2fe99e8SKumar Sanghvi err = -ENOMEM; 1236d2fe99e8SKumar Sanghvi goto out; 1237d2fe99e8SKumar Sanghvi } 1238cfdda9d7SSteve Wise goto out; 1239cfdda9d7SSteve Wise err: 1240b21ef16aSSteve Wise state_set(&ep->com, ABORTING); 1241b21ef16aSSteve Wise send_abort(ep, skb, GFP_KERNEL); 1242cfdda9d7SSteve Wise out: 1243cfdda9d7SSteve Wise connect_reply_upcall(ep, err); 1244cfdda9d7SSteve Wise return; 1245cfdda9d7SSteve Wise } 1246cfdda9d7SSteve Wise 1247cfdda9d7SSteve Wise static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) 1248cfdda9d7SSteve Wise { 1249cfdda9d7SSteve Wise struct mpa_message *mpa; 1250d2fe99e8SKumar Sanghvi struct mpa_v2_conn_params *mpa_v2_params; 1251cfdda9d7SSteve Wise u16 plen; 1252cfdda9d7SSteve Wise 1253cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1254cfdda9d7SSteve Wise 1255cfdda9d7SSteve Wise if (state_read(&ep->com) != MPA_REQ_WAIT) 1256cfdda9d7SSteve Wise return; 1257cfdda9d7SSteve Wise 1258cfdda9d7SSteve Wise /* 1259cfdda9d7SSteve Wise * If we get more than the supported amount of private data 1260cfdda9d7SSteve Wise * then we must fail this connection. 1261cfdda9d7SSteve Wise */ 1262cfdda9d7SSteve Wise if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 1263cfdda9d7SSteve Wise stop_ep_timer(ep); 1264cfdda9d7SSteve Wise abort_connection(ep, skb, GFP_KERNEL); 1265cfdda9d7SSteve Wise return; 1266cfdda9d7SSteve Wise } 1267cfdda9d7SSteve Wise 1268cfdda9d7SSteve Wise PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); 1269cfdda9d7SSteve Wise 1270cfdda9d7SSteve Wise /* 1271cfdda9d7SSteve Wise * Copy the new data into our accumulation buffer. 1272cfdda9d7SSteve Wise */ 1273cfdda9d7SSteve Wise skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 1274cfdda9d7SSteve Wise skb->len); 1275cfdda9d7SSteve Wise ep->mpa_pkt_len += skb->len; 1276cfdda9d7SSteve Wise 1277cfdda9d7SSteve Wise /* 1278cfdda9d7SSteve Wise * If we don't even have the mpa message, then bail. 1279cfdda9d7SSteve Wise * We'll continue process when more data arrives. 1280cfdda9d7SSteve Wise */ 1281cfdda9d7SSteve Wise if (ep->mpa_pkt_len < sizeof(*mpa)) 1282cfdda9d7SSteve Wise return; 1283cfdda9d7SSteve Wise 1284cfdda9d7SSteve Wise PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); 1285cfdda9d7SSteve Wise stop_ep_timer(ep); 1286cfdda9d7SSteve Wise mpa = (struct mpa_message *) ep->mpa_pkt; 1287cfdda9d7SSteve Wise 1288cfdda9d7SSteve Wise /* 1289cfdda9d7SSteve Wise * Validate MPA Header. 1290cfdda9d7SSteve Wise */ 1291d2fe99e8SKumar Sanghvi if (mpa->revision > mpa_rev) { 1292d2fe99e8SKumar Sanghvi printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," 1293d2fe99e8SKumar Sanghvi " Received = %d\n", __func__, mpa_rev, mpa->revision); 1294cfdda9d7SSteve Wise abort_connection(ep, skb, GFP_KERNEL); 1295cfdda9d7SSteve Wise return; 1296cfdda9d7SSteve Wise } 1297cfdda9d7SSteve Wise 1298cfdda9d7SSteve Wise if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { 1299cfdda9d7SSteve Wise abort_connection(ep, skb, GFP_KERNEL); 1300cfdda9d7SSteve Wise return; 1301cfdda9d7SSteve Wise } 1302cfdda9d7SSteve Wise 1303cfdda9d7SSteve Wise plen = ntohs(mpa->private_data_size); 1304cfdda9d7SSteve Wise 1305cfdda9d7SSteve Wise /* 1306cfdda9d7SSteve Wise * Fail if there's too much private data. 1307cfdda9d7SSteve Wise */ 1308cfdda9d7SSteve Wise if (plen > MPA_MAX_PRIVATE_DATA) { 1309cfdda9d7SSteve Wise abort_connection(ep, skb, GFP_KERNEL); 1310cfdda9d7SSteve Wise return; 1311cfdda9d7SSteve Wise } 1312cfdda9d7SSteve Wise 1313cfdda9d7SSteve Wise /* 1314cfdda9d7SSteve Wise * If plen does not account for pkt size 1315cfdda9d7SSteve Wise */ 1316cfdda9d7SSteve Wise if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1317cfdda9d7SSteve Wise abort_connection(ep, skb, GFP_KERNEL); 1318cfdda9d7SSteve Wise return; 1319cfdda9d7SSteve Wise } 1320cfdda9d7SSteve Wise ep->plen = (u8) plen; 1321cfdda9d7SSteve Wise 1322cfdda9d7SSteve Wise /* 1323cfdda9d7SSteve Wise * If we don't have all the pdata yet, then bail. 1324cfdda9d7SSteve Wise */ 1325cfdda9d7SSteve Wise if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1326cfdda9d7SSteve Wise return; 1327cfdda9d7SSteve Wise 1328cfdda9d7SSteve Wise /* 1329cfdda9d7SSteve Wise * If we get here we have accumulated the entire mpa 1330cfdda9d7SSteve Wise * start reply message including private data. 1331cfdda9d7SSteve Wise */ 1332cfdda9d7SSteve Wise ep->mpa_attr.initiator = 0; 1333cfdda9d7SSteve Wise ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1334cfdda9d7SSteve Wise ep->mpa_attr.recv_marker_enabled = markers_enabled; 1335cfdda9d7SSteve Wise ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1336d2fe99e8SKumar Sanghvi ep->mpa_attr.version = mpa->revision; 1337d2fe99e8SKumar Sanghvi if (mpa->revision == 1) 1338d2fe99e8SKumar Sanghvi ep->tried_with_mpa_v1 = 1; 1339d2fe99e8SKumar Sanghvi ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1340d2fe99e8SKumar Sanghvi 1341d2fe99e8SKumar Sanghvi if (mpa->revision == 2) { 1342d2fe99e8SKumar Sanghvi ep->mpa_attr.enhanced_rdma_conn = 1343d2fe99e8SKumar Sanghvi mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1344d2fe99e8SKumar Sanghvi if (ep->mpa_attr.enhanced_rdma_conn) { 1345d2fe99e8SKumar Sanghvi mpa_v2_params = (struct mpa_v2_conn_params *) 1346d2fe99e8SKumar Sanghvi (ep->mpa_pkt + sizeof(*mpa)); 1347d2fe99e8SKumar Sanghvi ep->ird = ntohs(mpa_v2_params->ird) & 1348d2fe99e8SKumar Sanghvi MPA_V2_IRD_ORD_MASK; 1349d2fe99e8SKumar Sanghvi ep->ord = ntohs(mpa_v2_params->ord) & 1350d2fe99e8SKumar Sanghvi MPA_V2_IRD_ORD_MASK; 1351d2fe99e8SKumar Sanghvi if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) 1352d2fe99e8SKumar Sanghvi if (peer2peer) { 1353d2fe99e8SKumar Sanghvi if (ntohs(mpa_v2_params->ord) & 1354d2fe99e8SKumar Sanghvi MPA_V2_RDMA_WRITE_RTR) 1355d2fe99e8SKumar Sanghvi ep->mpa_attr.p2p_type = 1356d2fe99e8SKumar Sanghvi FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1357d2fe99e8SKumar Sanghvi else if (ntohs(mpa_v2_params->ord) & 1358d2fe99e8SKumar Sanghvi MPA_V2_RDMA_READ_RTR) 1359d2fe99e8SKumar Sanghvi ep->mpa_attr.p2p_type = 1360d2fe99e8SKumar Sanghvi FW_RI_INIT_P2PTYPE_READ_REQ; 1361d2fe99e8SKumar Sanghvi } 1362d2fe99e8SKumar Sanghvi } 1363d2fe99e8SKumar Sanghvi } else if (mpa->revision == 1) 1364d2fe99e8SKumar Sanghvi if (peer2peer) 1365d2fe99e8SKumar Sanghvi ep->mpa_attr.p2p_type = p2p_type; 1366d2fe99e8SKumar Sanghvi 1367cfdda9d7SSteve Wise PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 1368cfdda9d7SSteve Wise "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__, 1369cfdda9d7SSteve Wise ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 1370cfdda9d7SSteve Wise ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1371cfdda9d7SSteve Wise ep->mpa_attr.p2p_type); 1372cfdda9d7SSteve Wise 1373cfdda9d7SSteve Wise state_set(&ep->com, MPA_REQ_RCVD); 1374cfdda9d7SSteve Wise 1375cfdda9d7SSteve Wise /* drive upcall */ 1376cfdda9d7SSteve Wise connect_request_upcall(ep); 1377cfdda9d7SSteve Wise return; 1378cfdda9d7SSteve Wise } 1379cfdda9d7SSteve Wise 1380cfdda9d7SSteve Wise static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) 1381cfdda9d7SSteve Wise { 1382cfdda9d7SSteve Wise struct c4iw_ep *ep; 1383cfdda9d7SSteve Wise struct cpl_rx_data *hdr = cplhdr(skb); 1384cfdda9d7SSteve Wise unsigned int dlen = ntohs(hdr->len); 1385cfdda9d7SSteve Wise unsigned int tid = GET_TID(hdr); 1386cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1387793dad94SVipul Pandya __u8 status = hdr->status; 1388cfdda9d7SSteve Wise 1389cfdda9d7SSteve Wise ep = lookup_tid(t, tid); 1390cfdda9d7SSteve Wise PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); 1391cfdda9d7SSteve Wise skb_pull(skb, sizeof(*hdr)); 1392cfdda9d7SSteve Wise skb_trim(skb, dlen); 1393cfdda9d7SSteve Wise 1394cfdda9d7SSteve Wise /* update RX credits */ 1395cfdda9d7SSteve Wise update_rx_credits(ep, dlen); 1396cfdda9d7SSteve Wise 1397cfdda9d7SSteve Wise switch (state_read(&ep->com)) { 1398cfdda9d7SSteve Wise case MPA_REQ_SENT: 139955abf8dfSVipul Pandya ep->rcv_seq += dlen; 1400cfdda9d7SSteve Wise process_mpa_reply(ep, skb); 1401cfdda9d7SSteve Wise break; 1402cfdda9d7SSteve Wise case MPA_REQ_WAIT: 140355abf8dfSVipul Pandya ep->rcv_seq += dlen; 1404cfdda9d7SSteve Wise process_mpa_request(ep, skb); 1405cfdda9d7SSteve Wise break; 1406cfdda9d7SSteve Wise default: 1407793dad94SVipul Pandya pr_err("%s Unexpected streaming data." \ 1408793dad94SVipul Pandya " ep %p state %d tid %u status %d\n", 1409793dad94SVipul Pandya __func__, ep, state_read(&ep->com), ep->hwtid, status); 1410cfdda9d7SSteve Wise 141155abf8dfSVipul Pandya if (ep->com.qp) { 141255abf8dfSVipul Pandya struct c4iw_qp_attributes attrs; 141355abf8dfSVipul Pandya 141455abf8dfSVipul Pandya attrs.next_state = C4IW_QP_STATE_ERROR; 141555abf8dfSVipul Pandya c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 141655abf8dfSVipul Pandya C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 141755abf8dfSVipul Pandya } 141855abf8dfSVipul Pandya c4iw_ep_disconnect(ep, 1, GFP_KERNEL); 1419cfdda9d7SSteve Wise break; 1420cfdda9d7SSteve Wise } 1421cfdda9d7SSteve Wise return 0; 1422cfdda9d7SSteve Wise } 1423cfdda9d7SSteve Wise 1424cfdda9d7SSteve Wise static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1425cfdda9d7SSteve Wise { 1426cfdda9d7SSteve Wise struct c4iw_ep *ep; 1427cfdda9d7SSteve Wise struct cpl_abort_rpl_rss *rpl = cplhdr(skb); 1428cfdda9d7SSteve Wise int release = 0; 1429cfdda9d7SSteve Wise unsigned int tid = GET_TID(rpl); 1430cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1431cfdda9d7SSteve Wise 1432cfdda9d7SSteve Wise ep = lookup_tid(t, tid); 14334984037bSVipul Pandya if (!ep) { 14344984037bSVipul Pandya printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n"); 14354984037bSVipul Pandya return 0; 14364984037bSVipul Pandya } 143792dd6c3dSWei Yongjun PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 14382f5b48c3SSteve Wise mutex_lock(&ep->com.mutex); 1439cfdda9d7SSteve Wise switch (ep->com.state) { 1440cfdda9d7SSteve Wise case ABORTING: 144191e9c071SVipul Pandya c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 1442cfdda9d7SSteve Wise __state_set(&ep->com, DEAD); 1443cfdda9d7SSteve Wise release = 1; 1444cfdda9d7SSteve Wise break; 1445cfdda9d7SSteve Wise default: 1446cfdda9d7SSteve Wise printk(KERN_ERR "%s ep %p state %d\n", 1447cfdda9d7SSteve Wise __func__, ep, ep->com.state); 1448cfdda9d7SSteve Wise break; 1449cfdda9d7SSteve Wise } 14502f5b48c3SSteve Wise mutex_unlock(&ep->com.mutex); 1451cfdda9d7SSteve Wise 1452cfdda9d7SSteve Wise if (release) 1453cfdda9d7SSteve Wise release_ep_resources(ep); 1454cfdda9d7SSteve Wise return 0; 1455cfdda9d7SSteve Wise } 1456cfdda9d7SSteve Wise 14575be78ee9SVipul Pandya static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) 14585be78ee9SVipul Pandya { 14595be78ee9SVipul Pandya struct sk_buff *skb; 14605be78ee9SVipul Pandya struct fw_ofld_connection_wr *req; 14615be78ee9SVipul Pandya unsigned int mtu_idx; 14625be78ee9SVipul Pandya int wscale; 14635be78ee9SVipul Pandya 14645be78ee9SVipul Pandya skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 14655be78ee9SVipul Pandya req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); 14665be78ee9SVipul Pandya memset(req, 0, sizeof(*req)); 14675be78ee9SVipul Pandya req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); 14685be78ee9SVipul Pandya req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); 14695be78ee9SVipul Pandya req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, 14705be78ee9SVipul Pandya ep->l2t)); 14715be78ee9SVipul Pandya req->le.lport = ep->com.local_addr.sin_port; 14725be78ee9SVipul Pandya req->le.pport = ep->com.remote_addr.sin_port; 14735be78ee9SVipul Pandya req->le.u.ipv4.lip = ep->com.local_addr.sin_addr.s_addr; 14745be78ee9SVipul Pandya req->le.u.ipv4.pip = ep->com.remote_addr.sin_addr.s_addr; 14755be78ee9SVipul Pandya req->tcb.t_state_to_astid = 14765be78ee9SVipul Pandya htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) | 14775be78ee9SVipul Pandya V_FW_OFLD_CONNECTION_WR_ASTID(atid)); 14785be78ee9SVipul Pandya req->tcb.cplrxdataack_cplpassacceptrpl = 14795be78ee9SVipul Pandya htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK); 14805be78ee9SVipul Pandya req->tcb.tx_max = jiffies; 1481793dad94SVipul Pandya req->tcb.rcv_adv = htons(1); 14825be78ee9SVipul Pandya cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 14835be78ee9SVipul Pandya wscale = compute_wscale(rcv_win); 14845be78ee9SVipul Pandya req->tcb.opt0 = TCAM_BYPASS(1) | 14855be78ee9SVipul Pandya (nocong ? NO_CONG(1) : 0) | 14865be78ee9SVipul Pandya KEEP_ALIVE(1) | 14875be78ee9SVipul Pandya DELACK(1) | 14885be78ee9SVipul Pandya WND_SCALE(wscale) | 14895be78ee9SVipul Pandya MSS_IDX(mtu_idx) | 14905be78ee9SVipul Pandya L2T_IDX(ep->l2t->idx) | 14915be78ee9SVipul Pandya TX_CHAN(ep->tx_chan) | 14925be78ee9SVipul Pandya SMAC_SEL(ep->smac_idx) | 14935be78ee9SVipul Pandya DSCP(ep->tos) | 14945be78ee9SVipul Pandya ULP_MODE(ULP_MODE_TCPDDP) | 14955be78ee9SVipul Pandya RCV_BUFSIZ(rcv_win >> 10); 14965be78ee9SVipul Pandya req->tcb.opt2 = PACE(1) | 14975be78ee9SVipul Pandya TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | 14985be78ee9SVipul Pandya RX_CHANNEL(0) | 14995be78ee9SVipul Pandya CCTRL_ECN(enable_ecn) | 15005be78ee9SVipul Pandya RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 15015be78ee9SVipul Pandya if (enable_tcp_timestamps) 15025be78ee9SVipul Pandya req->tcb.opt2 |= TSTAMPS_EN(1); 15035be78ee9SVipul Pandya if (enable_tcp_sack) 15045be78ee9SVipul Pandya req->tcb.opt2 |= SACK_EN(1); 15055be78ee9SVipul Pandya if (wscale && enable_tcp_window_scaling) 15065be78ee9SVipul Pandya req->tcb.opt2 |= WND_SCALE_EN(1); 15075be78ee9SVipul Pandya req->tcb.opt0 = cpu_to_be64(req->tcb.opt0); 15085be78ee9SVipul Pandya req->tcb.opt2 = cpu_to_be32(req->tcb.opt2); 1509793dad94SVipul Pandya set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); 1510793dad94SVipul Pandya set_bit(ACT_OFLD_CONN, &ep->com.history); 15115be78ee9SVipul Pandya c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 15125be78ee9SVipul Pandya } 15135be78ee9SVipul Pandya 1514cfdda9d7SSteve Wise /* 1515cfdda9d7SSteve Wise * Return whether a failed active open has allocated a TID 1516cfdda9d7SSteve Wise */ 1517cfdda9d7SSteve Wise static inline int act_open_has_tid(int status) 1518cfdda9d7SSteve Wise { 1519cfdda9d7SSteve Wise return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && 1520cfdda9d7SSteve Wise status != CPL_ERR_ARP_MISS; 1521cfdda9d7SSteve Wise } 1522cfdda9d7SSteve Wise 1523793dad94SVipul Pandya #define ACT_OPEN_RETRY_COUNT 2 1524793dad94SVipul Pandya 1525793dad94SVipul Pandya static int c4iw_reconnect(struct c4iw_ep *ep) 1526793dad94SVipul Pandya { 1527793dad94SVipul Pandya int err = 0; 1528793dad94SVipul Pandya struct rtable *rt; 1529793dad94SVipul Pandya struct port_info *pi; 1530793dad94SVipul Pandya struct net_device *pdev; 1531793dad94SVipul Pandya int step; 1532793dad94SVipul Pandya struct neighbour *neigh; 1533793dad94SVipul Pandya 1534793dad94SVipul Pandya PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); 1535793dad94SVipul Pandya init_timer(&ep->timer); 1536793dad94SVipul Pandya 1537793dad94SVipul Pandya /* 1538793dad94SVipul Pandya * Allocate an active TID to initiate a TCP connection. 1539793dad94SVipul Pandya */ 1540793dad94SVipul Pandya ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep); 1541793dad94SVipul Pandya if (ep->atid == -1) { 1542793dad94SVipul Pandya pr_err("%s - cannot alloc atid.\n", __func__); 1543793dad94SVipul Pandya err = -ENOMEM; 1544793dad94SVipul Pandya goto fail2; 1545793dad94SVipul Pandya } 1546793dad94SVipul Pandya insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid); 1547793dad94SVipul Pandya 1548793dad94SVipul Pandya /* find a route */ 1549793dad94SVipul Pandya rt = find_route(ep->com.dev, 1550793dad94SVipul Pandya ep->com.cm_id->local_addr.sin_addr.s_addr, 1551793dad94SVipul Pandya ep->com.cm_id->remote_addr.sin_addr.s_addr, 1552793dad94SVipul Pandya ep->com.cm_id->local_addr.sin_port, 1553793dad94SVipul Pandya ep->com.cm_id->remote_addr.sin_port, 0); 1554793dad94SVipul Pandya if (!rt) { 1555793dad94SVipul Pandya pr_err("%s - cannot find route.\n", __func__); 1556793dad94SVipul Pandya err = -EHOSTUNREACH; 1557793dad94SVipul Pandya goto fail3; 1558793dad94SVipul Pandya } 1559793dad94SVipul Pandya ep->dst = &rt->dst; 1560793dad94SVipul Pandya 1561793dad94SVipul Pandya neigh = dst_neigh_lookup(ep->dst, 1562793dad94SVipul Pandya &ep->com.cm_id->remote_addr.sin_addr.s_addr); 1563793dad94SVipul Pandya /* get a l2t entry */ 1564793dad94SVipul Pandya if (neigh->dev->flags & IFF_LOOPBACK) { 1565793dad94SVipul Pandya PDBG("%s LOOPBACK\n", __func__); 1566793dad94SVipul Pandya pdev = ip_dev_find(&init_net, 1567793dad94SVipul Pandya ep->com.cm_id->remote_addr.sin_addr.s_addr); 1568793dad94SVipul Pandya ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, 1569793dad94SVipul Pandya neigh, pdev, 0); 1570793dad94SVipul Pandya pi = (struct port_info *)netdev_priv(pdev); 1571793dad94SVipul Pandya ep->mtu = pdev->mtu; 1572793dad94SVipul Pandya ep->tx_chan = cxgb4_port_chan(pdev); 1573793dad94SVipul Pandya ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; 1574793dad94SVipul Pandya dev_put(pdev); 1575793dad94SVipul Pandya } else { 1576793dad94SVipul Pandya ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, 1577793dad94SVipul Pandya neigh, neigh->dev, 0); 1578793dad94SVipul Pandya pi = (struct port_info *)netdev_priv(neigh->dev); 1579793dad94SVipul Pandya ep->mtu = dst_mtu(ep->dst); 1580793dad94SVipul Pandya ep->tx_chan = cxgb4_port_chan(neigh->dev); 1581793dad94SVipul Pandya ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 1582793dad94SVipul Pandya 0x7F) << 1; 1583793dad94SVipul Pandya } 1584793dad94SVipul Pandya 1585793dad94SVipul Pandya step = ep->com.dev->rdev.lldi.ntxq / ep->com.dev->rdev.lldi.nchan; 1586793dad94SVipul Pandya ep->txq_idx = pi->port_id * step; 1587793dad94SVipul Pandya ep->ctrlq_idx = pi->port_id; 1588793dad94SVipul Pandya step = ep->com.dev->rdev.lldi.nrxq / ep->com.dev->rdev.lldi.nchan; 1589793dad94SVipul Pandya ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[pi->port_id * step]; 1590793dad94SVipul Pandya 1591793dad94SVipul Pandya if (!ep->l2t) { 1592793dad94SVipul Pandya pr_err("%s - cannot alloc l2e.\n", __func__); 1593793dad94SVipul Pandya err = -ENOMEM; 1594793dad94SVipul Pandya goto fail4; 1595793dad94SVipul Pandya } 1596793dad94SVipul Pandya 1597793dad94SVipul Pandya PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 1598793dad94SVipul Pandya __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 1599793dad94SVipul Pandya ep->l2t->idx); 1600793dad94SVipul Pandya 1601793dad94SVipul Pandya state_set(&ep->com, CONNECTING); 1602793dad94SVipul Pandya ep->tos = 0; 1603793dad94SVipul Pandya 1604793dad94SVipul Pandya /* send connect request to rnic */ 1605793dad94SVipul Pandya err = send_connect(ep); 1606793dad94SVipul Pandya if (!err) 1607793dad94SVipul Pandya goto out; 1608793dad94SVipul Pandya 1609793dad94SVipul Pandya cxgb4_l2t_release(ep->l2t); 1610793dad94SVipul Pandya fail4: 1611793dad94SVipul Pandya dst_release(ep->dst); 1612793dad94SVipul Pandya fail3: 1613793dad94SVipul Pandya remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 1614793dad94SVipul Pandya cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 1615793dad94SVipul Pandya fail2: 1616793dad94SVipul Pandya /* 1617793dad94SVipul Pandya * remember to send notification to upper layer. 1618793dad94SVipul Pandya * We are in here so the upper layer is not aware that this is 1619793dad94SVipul Pandya * re-connect attempt and so, upper layer is still waiting for 1620793dad94SVipul Pandya * response of 1st connect request. 1621793dad94SVipul Pandya */ 1622793dad94SVipul Pandya connect_reply_upcall(ep, -ECONNRESET); 1623793dad94SVipul Pandya c4iw_put_ep(&ep->com); 1624793dad94SVipul Pandya out: 1625793dad94SVipul Pandya return err; 1626793dad94SVipul Pandya } 1627793dad94SVipul Pandya 1628cfdda9d7SSteve Wise static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1629cfdda9d7SSteve Wise { 1630cfdda9d7SSteve Wise struct c4iw_ep *ep; 1631cfdda9d7SSteve Wise struct cpl_act_open_rpl *rpl = cplhdr(skb); 1632cfdda9d7SSteve Wise unsigned int atid = GET_TID_TID(GET_AOPEN_ATID( 1633cfdda9d7SSteve Wise ntohl(rpl->atid_status))); 1634cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1635cfdda9d7SSteve Wise int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status)); 1636cfdda9d7SSteve Wise 1637cfdda9d7SSteve Wise ep = lookup_atid(t, atid); 1638cfdda9d7SSteve Wise 1639cfdda9d7SSteve Wise PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, 1640cfdda9d7SSteve Wise status, status2errno(status)); 1641cfdda9d7SSteve Wise 1642cfdda9d7SSteve Wise if (status == CPL_ERR_RTX_NEG_ADVICE) { 1643cfdda9d7SSteve Wise printk(KERN_WARNING MOD "Connection problems for atid %u\n", 1644cfdda9d7SSteve Wise atid); 1645cfdda9d7SSteve Wise return 0; 1646cfdda9d7SSteve Wise } 1647cfdda9d7SSteve Wise 1648793dad94SVipul Pandya set_bit(ACT_OPEN_RPL, &ep->com.history); 1649793dad94SVipul Pandya 1650d716a2a0SVipul Pandya /* 1651d716a2a0SVipul Pandya * Log interesting failures. 1652d716a2a0SVipul Pandya */ 1653d716a2a0SVipul Pandya switch (status) { 1654d716a2a0SVipul Pandya case CPL_ERR_CONN_RESET: 1655d716a2a0SVipul Pandya case CPL_ERR_CONN_TIMEDOUT: 1656d716a2a0SVipul Pandya break; 16575be78ee9SVipul Pandya case CPL_ERR_TCAM_FULL: 1658793dad94SVipul Pandya if (dev->rdev.lldi.enable_fw_ofld_conn) { 16595be78ee9SVipul Pandya mutex_lock(&dev->rdev.stats.lock); 16605be78ee9SVipul Pandya dev->rdev.stats.tcam_full++; 16615be78ee9SVipul Pandya mutex_unlock(&dev->rdev.stats.lock); 16625be78ee9SVipul Pandya send_fw_act_open_req(ep, 1663793dad94SVipul Pandya GET_TID_TID(GET_AOPEN_ATID( 1664793dad94SVipul Pandya ntohl(rpl->atid_status)))); 16655be78ee9SVipul Pandya return 0; 1666793dad94SVipul Pandya } 1667793dad94SVipul Pandya break; 1668793dad94SVipul Pandya case CPL_ERR_CONN_EXIST: 1669793dad94SVipul Pandya if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 1670793dad94SVipul Pandya set_bit(ACT_RETRY_INUSE, &ep->com.history); 1671793dad94SVipul Pandya remove_handle(ep->com.dev, &ep->com.dev->atid_idr, 1672793dad94SVipul Pandya atid); 1673793dad94SVipul Pandya cxgb4_free_atid(t, atid); 1674793dad94SVipul Pandya dst_release(ep->dst); 1675793dad94SVipul Pandya cxgb4_l2t_release(ep->l2t); 1676793dad94SVipul Pandya c4iw_reconnect(ep); 1677793dad94SVipul Pandya return 0; 1678793dad94SVipul Pandya } 16795be78ee9SVipul Pandya break; 1680d716a2a0SVipul Pandya default: 1681d716a2a0SVipul Pandya printk(KERN_INFO MOD "Active open failure - " 1682d716a2a0SVipul Pandya "atid %u status %u errno %d %pI4:%u->%pI4:%u\n", 1683d716a2a0SVipul Pandya atid, status, status2errno(status), 1684d716a2a0SVipul Pandya &ep->com.local_addr.sin_addr.s_addr, 1685d716a2a0SVipul Pandya ntohs(ep->com.local_addr.sin_port), 1686d716a2a0SVipul Pandya &ep->com.remote_addr.sin_addr.s_addr, 1687d716a2a0SVipul Pandya ntohs(ep->com.remote_addr.sin_port)); 1688d716a2a0SVipul Pandya break; 1689d716a2a0SVipul Pandya } 1690d716a2a0SVipul Pandya 1691cfdda9d7SSteve Wise connect_reply_upcall(ep, status2errno(status)); 1692cfdda9d7SSteve Wise state_set(&ep->com, DEAD); 1693cfdda9d7SSteve Wise 1694cfdda9d7SSteve Wise if (status && act_open_has_tid(status)) 1695cfdda9d7SSteve Wise cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); 1696cfdda9d7SSteve Wise 1697793dad94SVipul Pandya remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); 1698cfdda9d7SSteve Wise cxgb4_free_atid(t, atid); 1699cfdda9d7SSteve Wise dst_release(ep->dst); 1700cfdda9d7SSteve Wise cxgb4_l2t_release(ep->l2t); 1701cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 1702cfdda9d7SSteve Wise 1703cfdda9d7SSteve Wise return 0; 1704cfdda9d7SSteve Wise } 1705cfdda9d7SSteve Wise 1706cfdda9d7SSteve Wise static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1707cfdda9d7SSteve Wise { 1708cfdda9d7SSteve Wise struct cpl_pass_open_rpl *rpl = cplhdr(skb); 1709cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1710cfdda9d7SSteve Wise unsigned int stid = GET_TID(rpl); 1711cfdda9d7SSteve Wise struct c4iw_listen_ep *ep = lookup_stid(t, stid); 1712cfdda9d7SSteve Wise 1713cfdda9d7SSteve Wise if (!ep) { 17141cab775cSVipul Pandya PDBG("%s stid %d lookup failure!\n", __func__, stid); 17151cab775cSVipul Pandya goto out; 1716cfdda9d7SSteve Wise } 1717cfdda9d7SSteve Wise PDBG("%s ep %p status %d error %d\n", __func__, ep, 1718cfdda9d7SSteve Wise rpl->status, status2errno(rpl->status)); 1719d9594d99SSteve Wise c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); 1720cfdda9d7SSteve Wise 17211cab775cSVipul Pandya out: 1722cfdda9d7SSteve Wise return 0; 1723cfdda9d7SSteve Wise } 1724cfdda9d7SSteve Wise 1725cfdda9d7SSteve Wise static int listen_stop(struct c4iw_listen_ep *ep) 1726cfdda9d7SSteve Wise { 1727cfdda9d7SSteve Wise struct sk_buff *skb; 1728cfdda9d7SSteve Wise struct cpl_close_listsvr_req *req; 1729cfdda9d7SSteve Wise 1730cfdda9d7SSteve Wise PDBG("%s ep %p\n", __func__, ep); 1731cfdda9d7SSteve Wise skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1732cfdda9d7SSteve Wise if (!skb) { 1733cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); 1734cfdda9d7SSteve Wise return -ENOMEM; 1735cfdda9d7SSteve Wise } 1736cfdda9d7SSteve Wise req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req)); 1737cfdda9d7SSteve Wise INIT_TP_WR(req, 0); 1738cfdda9d7SSteve Wise OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, 1739cfdda9d7SSteve Wise ep->stid)); 1740cfdda9d7SSteve Wise req->reply_ctrl = cpu_to_be16( 1741cfdda9d7SSteve Wise QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0])); 1742cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); 1743cfdda9d7SSteve Wise return c4iw_ofld_send(&ep->com.dev->rdev, skb); 1744cfdda9d7SSteve Wise } 1745cfdda9d7SSteve Wise 1746cfdda9d7SSteve Wise static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1747cfdda9d7SSteve Wise { 1748cfdda9d7SSteve Wise struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); 1749cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1750cfdda9d7SSteve Wise unsigned int stid = GET_TID(rpl); 1751cfdda9d7SSteve Wise struct c4iw_listen_ep *ep = lookup_stid(t, stid); 1752cfdda9d7SSteve Wise 1753cfdda9d7SSteve Wise PDBG("%s ep %p\n", __func__, ep); 1754d9594d99SSteve Wise c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); 1755cfdda9d7SSteve Wise return 0; 1756cfdda9d7SSteve Wise } 1757cfdda9d7SSteve Wise 1758cfdda9d7SSteve Wise static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb, 1759cfdda9d7SSteve Wise struct cpl_pass_accept_req *req) 1760cfdda9d7SSteve Wise { 1761cfdda9d7SSteve Wise struct cpl_pass_accept_rpl *rpl; 1762cfdda9d7SSteve Wise unsigned int mtu_idx; 1763cfdda9d7SSteve Wise u64 opt0; 1764cfdda9d7SSteve Wise u32 opt2; 1765cfdda9d7SSteve Wise int wscale; 1766cfdda9d7SSteve Wise 1767cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1768cfdda9d7SSteve Wise BUG_ON(skb_cloned(skb)); 1769cfdda9d7SSteve Wise skb_trim(skb, sizeof(*rpl)); 1770cfdda9d7SSteve Wise skb_get(skb); 1771cfdda9d7SSteve Wise cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 1772cfdda9d7SSteve Wise wscale = compute_wscale(rcv_win); 17735be78ee9SVipul Pandya opt0 = (nocong ? NO_CONG(1) : 0) | 17745be78ee9SVipul Pandya KEEP_ALIVE(1) | 1775ba6d3925SSteve Wise DELACK(1) | 1776cfdda9d7SSteve Wise WND_SCALE(wscale) | 1777cfdda9d7SSteve Wise MSS_IDX(mtu_idx) | 1778cfdda9d7SSteve Wise L2T_IDX(ep->l2t->idx) | 1779cfdda9d7SSteve Wise TX_CHAN(ep->tx_chan) | 1780cfdda9d7SSteve Wise SMAC_SEL(ep->smac_idx) | 17815be78ee9SVipul Pandya DSCP(ep->tos >> 2) | 1782b48f3b9cSSteve Wise ULP_MODE(ULP_MODE_TCPDDP) | 1783cfdda9d7SSteve Wise RCV_BUFSIZ(rcv_win>>10); 1784cfdda9d7SSteve Wise opt2 = RX_CHANNEL(0) | 1785cfdda9d7SSteve Wise RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 1786cfdda9d7SSteve Wise 1787cfdda9d7SSteve Wise if (enable_tcp_timestamps && req->tcpopt.tstamp) 1788cfdda9d7SSteve Wise opt2 |= TSTAMPS_EN(1); 1789cfdda9d7SSteve Wise if (enable_tcp_sack && req->tcpopt.sack) 1790cfdda9d7SSteve Wise opt2 |= SACK_EN(1); 1791cfdda9d7SSteve Wise if (wscale && enable_tcp_window_scaling) 1792cfdda9d7SSteve Wise opt2 |= WND_SCALE_EN(1); 17935be78ee9SVipul Pandya if (enable_ecn) { 17945be78ee9SVipul Pandya const struct tcphdr *tcph; 17955be78ee9SVipul Pandya u32 hlen = ntohl(req->hdr_len); 17965be78ee9SVipul Pandya 17975be78ee9SVipul Pandya tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) + 17985be78ee9SVipul Pandya G_IP_HDR_LEN(hlen); 17995be78ee9SVipul Pandya if (tcph->ece && tcph->cwr) 18005be78ee9SVipul Pandya opt2 |= CCTRL_ECN(1); 18015be78ee9SVipul Pandya } 1802cfdda9d7SSteve Wise 1803cfdda9d7SSteve Wise rpl = cplhdr(skb); 1804cfdda9d7SSteve Wise INIT_TP_WR(rpl, ep->hwtid); 1805cfdda9d7SSteve Wise OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, 1806cfdda9d7SSteve Wise ep->hwtid)); 1807cfdda9d7SSteve Wise rpl->opt0 = cpu_to_be64(opt0); 1808cfdda9d7SSteve Wise rpl->opt2 = cpu_to_be32(opt2); 1809d4f1a5c6SSteve Wise set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 1810cfdda9d7SSteve Wise c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1811cfdda9d7SSteve Wise 1812cfdda9d7SSteve Wise return; 1813cfdda9d7SSteve Wise } 1814cfdda9d7SSteve Wise 1815cfdda9d7SSteve Wise static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip, 1816cfdda9d7SSteve Wise struct sk_buff *skb) 1817cfdda9d7SSteve Wise { 1818cfdda9d7SSteve Wise PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid, 1819cfdda9d7SSteve Wise peer_ip); 1820cfdda9d7SSteve Wise BUG_ON(skb_cloned(skb)); 1821cfdda9d7SSteve Wise skb_trim(skb, sizeof(struct cpl_tid_release)); 1822cfdda9d7SSteve Wise skb_get(skb); 1823cfdda9d7SSteve Wise release_tid(&dev->rdev, hwtid, skb); 1824cfdda9d7SSteve Wise return; 1825cfdda9d7SSteve Wise } 1826cfdda9d7SSteve Wise 1827cfdda9d7SSteve Wise static void get_4tuple(struct cpl_pass_accept_req *req, 1828cfdda9d7SSteve Wise __be32 *local_ip, __be32 *peer_ip, 1829cfdda9d7SSteve Wise __be16 *local_port, __be16 *peer_port) 1830cfdda9d7SSteve Wise { 1831cfdda9d7SSteve Wise int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len)); 1832cfdda9d7SSteve Wise int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len)); 1833cfdda9d7SSteve Wise struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len); 1834cfdda9d7SSteve Wise struct tcphdr *tcp = (struct tcphdr *) 1835cfdda9d7SSteve Wise ((u8 *)(req + 1) + eth_len + ip_len); 1836cfdda9d7SSteve Wise 1837cfdda9d7SSteve Wise PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__, 1838cfdda9d7SSteve Wise ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source), 1839cfdda9d7SSteve Wise ntohs(tcp->dest)); 1840cfdda9d7SSteve Wise 1841cfdda9d7SSteve Wise *peer_ip = ip->saddr; 1842cfdda9d7SSteve Wise *local_ip = ip->daddr; 1843cfdda9d7SSteve Wise *peer_port = tcp->source; 1844cfdda9d7SSteve Wise *local_port = tcp->dest; 1845cfdda9d7SSteve Wise 1846cfdda9d7SSteve Wise return; 1847cfdda9d7SSteve Wise } 1848cfdda9d7SSteve Wise 18493786cf18SDavid Miller static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst, 18503786cf18SDavid Miller struct c4iw_dev *cdev, bool clear_mpa_v1) 18513786cf18SDavid Miller { 18523786cf18SDavid Miller struct neighbour *n; 18533786cf18SDavid Miller int err, step; 18543786cf18SDavid Miller 185564b7007eSDavid Miller n = dst_neigh_lookup(dst, &peer_ip); 18563786cf18SDavid Miller if (!n) 185764b7007eSDavid Miller return -ENODEV; 185864b7007eSDavid Miller 185964b7007eSDavid Miller rcu_read_lock(); 18603786cf18SDavid Miller err = -ENOMEM; 18613786cf18SDavid Miller if (n->dev->flags & IFF_LOOPBACK) { 18623786cf18SDavid Miller struct net_device *pdev; 18633786cf18SDavid Miller 18643786cf18SDavid Miller pdev = ip_dev_find(&init_net, peer_ip); 186571b43fd5SThadeu Lima de Souza Cascardo if (!pdev) { 186671b43fd5SThadeu Lima de Souza Cascardo err = -ENODEV; 186771b43fd5SThadeu Lima de Souza Cascardo goto out; 186871b43fd5SThadeu Lima de Souza Cascardo } 18693786cf18SDavid Miller ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, 18703786cf18SDavid Miller n, pdev, 0); 18713786cf18SDavid Miller if (!ep->l2t) 18723786cf18SDavid Miller goto out; 18733786cf18SDavid Miller ep->mtu = pdev->mtu; 18743786cf18SDavid Miller ep->tx_chan = cxgb4_port_chan(pdev); 18753786cf18SDavid Miller ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; 18763786cf18SDavid Miller step = cdev->rdev.lldi.ntxq / 18773786cf18SDavid Miller cdev->rdev.lldi.nchan; 18783786cf18SDavid Miller ep->txq_idx = cxgb4_port_idx(pdev) * step; 18793786cf18SDavid Miller step = cdev->rdev.lldi.nrxq / 18803786cf18SDavid Miller cdev->rdev.lldi.nchan; 18813786cf18SDavid Miller ep->ctrlq_idx = cxgb4_port_idx(pdev); 18823786cf18SDavid Miller ep->rss_qid = cdev->rdev.lldi.rxq_ids[ 18833786cf18SDavid Miller cxgb4_port_idx(pdev) * step]; 18843786cf18SDavid Miller dev_put(pdev); 18853786cf18SDavid Miller } else { 18863786cf18SDavid Miller ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, 18873786cf18SDavid Miller n, n->dev, 0); 18883786cf18SDavid Miller if (!ep->l2t) 18893786cf18SDavid Miller goto out; 1890bd61baafSSteve Wise ep->mtu = dst_mtu(dst); 18913786cf18SDavid Miller ep->tx_chan = cxgb4_port_chan(n->dev); 18923786cf18SDavid Miller ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1; 18933786cf18SDavid Miller step = cdev->rdev.lldi.ntxq / 18943786cf18SDavid Miller cdev->rdev.lldi.nchan; 18953786cf18SDavid Miller ep->txq_idx = cxgb4_port_idx(n->dev) * step; 18963786cf18SDavid Miller ep->ctrlq_idx = cxgb4_port_idx(n->dev); 18973786cf18SDavid Miller step = cdev->rdev.lldi.nrxq / 18983786cf18SDavid Miller cdev->rdev.lldi.nchan; 18993786cf18SDavid Miller ep->rss_qid = cdev->rdev.lldi.rxq_ids[ 19003786cf18SDavid Miller cxgb4_port_idx(n->dev) * step]; 19013786cf18SDavid Miller 19023786cf18SDavid Miller if (clear_mpa_v1) { 19033786cf18SDavid Miller ep->retry_with_mpa_v1 = 0; 19043786cf18SDavid Miller ep->tried_with_mpa_v1 = 0; 19053786cf18SDavid Miller } 19063786cf18SDavid Miller } 19073786cf18SDavid Miller err = 0; 19083786cf18SDavid Miller out: 19093786cf18SDavid Miller rcu_read_unlock(); 19103786cf18SDavid Miller 191164b7007eSDavid Miller neigh_release(n); 191264b7007eSDavid Miller 19133786cf18SDavid Miller return err; 19143786cf18SDavid Miller } 19153786cf18SDavid Miller 1916cfdda9d7SSteve Wise static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) 1917cfdda9d7SSteve Wise { 1918793dad94SVipul Pandya struct c4iw_ep *child_ep = NULL, *parent_ep; 1919cfdda9d7SSteve Wise struct cpl_pass_accept_req *req = cplhdr(skb); 1920cfdda9d7SSteve Wise unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); 1921cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1922cfdda9d7SSteve Wise unsigned int hwtid = GET_TID(req); 1923cfdda9d7SSteve Wise struct dst_entry *dst; 1924cfdda9d7SSteve Wise struct rtable *rt; 19251cab775cSVipul Pandya __be32 local_ip, peer_ip = 0; 1926cfdda9d7SSteve Wise __be16 local_port, peer_port; 19273786cf18SDavid Miller int err; 19281cab775cSVipul Pandya u16 peer_mss = ntohs(req->tcpopt.mss); 1929cfdda9d7SSteve Wise 1930cfdda9d7SSteve Wise parent_ep = lookup_stid(t, stid); 19311cab775cSVipul Pandya if (!parent_ep) { 19321cab775cSVipul Pandya PDBG("%s connect request on invalid stid %d\n", __func__, stid); 19331cab775cSVipul Pandya goto reject; 19341cab775cSVipul Pandya } 1935cfdda9d7SSteve Wise get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port); 1936cfdda9d7SSteve Wise 19371cab775cSVipul Pandya PDBG("%s parent ep %p hwtid %u laddr 0x%x raddr 0x%x lport %d " \ 19381cab775cSVipul Pandya "rport %d peer_mss %d\n", __func__, parent_ep, hwtid, 19391cab775cSVipul Pandya ntohl(local_ip), ntohl(peer_ip), ntohs(local_port), 19401cab775cSVipul Pandya ntohs(peer_port), peer_mss); 19411cab775cSVipul Pandya 1942cfdda9d7SSteve Wise if (state_read(&parent_ep->com) != LISTEN) { 1943cfdda9d7SSteve Wise printk(KERN_ERR "%s - listening ep not in LISTEN\n", 1944cfdda9d7SSteve Wise __func__); 1945cfdda9d7SSteve Wise goto reject; 1946cfdda9d7SSteve Wise } 1947cfdda9d7SSteve Wise 1948cfdda9d7SSteve Wise /* Find output route */ 1949cfdda9d7SSteve Wise rt = find_route(dev, local_ip, peer_ip, local_port, peer_port, 1950cfdda9d7SSteve Wise GET_POPEN_TOS(ntohl(req->tos_stid))); 1951cfdda9d7SSteve Wise if (!rt) { 1952cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - failed to find dst entry!\n", 1953cfdda9d7SSteve Wise __func__); 1954cfdda9d7SSteve Wise goto reject; 1955cfdda9d7SSteve Wise } 1956d8d1f30bSChangli Gao dst = &rt->dst; 1957cfdda9d7SSteve Wise 1958cfdda9d7SSteve Wise child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); 1959cfdda9d7SSteve Wise if (!child_ep) { 1960cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", 1961cfdda9d7SSteve Wise __func__); 1962cfdda9d7SSteve Wise dst_release(dst); 1963cfdda9d7SSteve Wise goto reject; 1964cfdda9d7SSteve Wise } 19653786cf18SDavid Miller 19663786cf18SDavid Miller err = import_ep(child_ep, peer_ip, dst, dev, false); 19673786cf18SDavid Miller if (err) { 19683786cf18SDavid Miller printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", 19693786cf18SDavid Miller __func__); 19703786cf18SDavid Miller dst_release(dst); 19713786cf18SDavid Miller kfree(child_ep); 19723786cf18SDavid Miller goto reject; 19733786cf18SDavid Miller } 19743786cf18SDavid Miller 19751cab775cSVipul Pandya if (peer_mss && child_ep->mtu > (peer_mss + 40)) 19761cab775cSVipul Pandya child_ep->mtu = peer_mss + 40; 19771cab775cSVipul Pandya 1978cfdda9d7SSteve Wise state_set(&child_ep->com, CONNECTING); 1979cfdda9d7SSteve Wise child_ep->com.dev = dev; 1980cfdda9d7SSteve Wise child_ep->com.cm_id = NULL; 1981cfdda9d7SSteve Wise child_ep->com.local_addr.sin_family = PF_INET; 1982cfdda9d7SSteve Wise child_ep->com.local_addr.sin_port = local_port; 1983cfdda9d7SSteve Wise child_ep->com.local_addr.sin_addr.s_addr = local_ip; 1984cfdda9d7SSteve Wise child_ep->com.remote_addr.sin_family = PF_INET; 1985cfdda9d7SSteve Wise child_ep->com.remote_addr.sin_port = peer_port; 1986cfdda9d7SSteve Wise child_ep->com.remote_addr.sin_addr.s_addr = peer_ip; 1987cfdda9d7SSteve Wise c4iw_get_ep(&parent_ep->com); 1988cfdda9d7SSteve Wise child_ep->parent_ep = parent_ep; 1989cfdda9d7SSteve Wise child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid)); 1990cfdda9d7SSteve Wise child_ep->dst = dst; 1991cfdda9d7SSteve Wise child_ep->hwtid = hwtid; 1992cfdda9d7SSteve Wise 1993cfdda9d7SSteve Wise PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__, 19943786cf18SDavid Miller child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid); 1995cfdda9d7SSteve Wise 1996cfdda9d7SSteve Wise init_timer(&child_ep->timer); 1997cfdda9d7SSteve Wise cxgb4_insert_tid(t, child_ep, hwtid); 1998cfdda9d7SSteve Wise accept_cr(child_ep, peer_ip, skb, req); 1999793dad94SVipul Pandya set_bit(PASS_ACCEPT_REQ, &child_ep->com.history); 2000cfdda9d7SSteve Wise goto out; 2001cfdda9d7SSteve Wise reject: 2002cfdda9d7SSteve Wise reject_cr(dev, hwtid, peer_ip, skb); 2003cfdda9d7SSteve Wise out: 2004cfdda9d7SSteve Wise return 0; 2005cfdda9d7SSteve Wise } 2006cfdda9d7SSteve Wise 2007cfdda9d7SSteve Wise static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) 2008cfdda9d7SSteve Wise { 2009cfdda9d7SSteve Wise struct c4iw_ep *ep; 2010cfdda9d7SSteve Wise struct cpl_pass_establish *req = cplhdr(skb); 2011cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 2012cfdda9d7SSteve Wise unsigned int tid = GET_TID(req); 2013cfdda9d7SSteve Wise 2014cfdda9d7SSteve Wise ep = lookup_tid(t, tid); 2015cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2016cfdda9d7SSteve Wise ep->snd_seq = be32_to_cpu(req->snd_isn); 2017cfdda9d7SSteve Wise ep->rcv_seq = be32_to_cpu(req->rcv_isn); 2018cfdda9d7SSteve Wise 20191cab775cSVipul Pandya PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid, 20201cab775cSVipul Pandya ntohs(req->tcp_opt)); 20211cab775cSVipul Pandya 2022cfdda9d7SSteve Wise set_emss(ep, ntohs(req->tcp_opt)); 2023793dad94SVipul Pandya insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid); 2024cfdda9d7SSteve Wise 2025cfdda9d7SSteve Wise dst_confirm(ep->dst); 2026cfdda9d7SSteve Wise state_set(&ep->com, MPA_REQ_WAIT); 2027cfdda9d7SSteve Wise start_ep_timer(ep); 2028cfdda9d7SSteve Wise send_flowc(ep, skb); 2029793dad94SVipul Pandya set_bit(PASS_ESTAB, &ep->com.history); 2030cfdda9d7SSteve Wise 2031cfdda9d7SSteve Wise return 0; 2032cfdda9d7SSteve Wise } 2033cfdda9d7SSteve Wise 2034cfdda9d7SSteve Wise static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) 2035cfdda9d7SSteve Wise { 2036cfdda9d7SSteve Wise struct cpl_peer_close *hdr = cplhdr(skb); 2037cfdda9d7SSteve Wise struct c4iw_ep *ep; 2038cfdda9d7SSteve Wise struct c4iw_qp_attributes attrs; 2039cfdda9d7SSteve Wise int disconnect = 1; 2040cfdda9d7SSteve Wise int release = 0; 2041cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 2042cfdda9d7SSteve Wise unsigned int tid = GET_TID(hdr); 20438da7e7a5SSteve Wise int ret; 2044cfdda9d7SSteve Wise 2045cfdda9d7SSteve Wise ep = lookup_tid(t, tid); 2046cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2047cfdda9d7SSteve Wise dst_confirm(ep->dst); 2048cfdda9d7SSteve Wise 2049793dad94SVipul Pandya set_bit(PEER_CLOSE, &ep->com.history); 20502f5b48c3SSteve Wise mutex_lock(&ep->com.mutex); 2051cfdda9d7SSteve Wise switch (ep->com.state) { 2052cfdda9d7SSteve Wise case MPA_REQ_WAIT: 2053cfdda9d7SSteve Wise __state_set(&ep->com, CLOSING); 2054cfdda9d7SSteve Wise break; 2055cfdda9d7SSteve Wise case MPA_REQ_SENT: 2056cfdda9d7SSteve Wise __state_set(&ep->com, CLOSING); 2057cfdda9d7SSteve Wise connect_reply_upcall(ep, -ECONNRESET); 2058cfdda9d7SSteve Wise break; 2059cfdda9d7SSteve Wise case MPA_REQ_RCVD: 2060cfdda9d7SSteve Wise 2061cfdda9d7SSteve Wise /* 2062cfdda9d7SSteve Wise * We're gonna mark this puppy DEAD, but keep 2063cfdda9d7SSteve Wise * the reference on it until the ULP accepts or 2064cfdda9d7SSteve Wise * rejects the CR. Also wake up anyone waiting 2065cfdda9d7SSteve Wise * in rdma connection migration (see c4iw_accept_cr()). 2066cfdda9d7SSteve Wise */ 2067cfdda9d7SSteve Wise __state_set(&ep->com, CLOSING); 2068cfdda9d7SSteve Wise PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 2069d9594d99SSteve Wise c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 2070cfdda9d7SSteve Wise break; 2071cfdda9d7SSteve Wise case MPA_REP_SENT: 2072cfdda9d7SSteve Wise __state_set(&ep->com, CLOSING); 2073cfdda9d7SSteve Wise PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 2074d9594d99SSteve Wise c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 2075cfdda9d7SSteve Wise break; 2076cfdda9d7SSteve Wise case FPDU_MODE: 2077ca5a2202SSteve Wise start_ep_timer(ep); 2078cfdda9d7SSteve Wise __state_set(&ep->com, CLOSING); 207930c95c2dSSteve Wise attrs.next_state = C4IW_QP_STATE_CLOSING; 20808da7e7a5SSteve Wise ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 208130c95c2dSSteve Wise C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 20828da7e7a5SSteve Wise if (ret != -ECONNRESET) { 2083cfdda9d7SSteve Wise peer_close_upcall(ep); 208430c95c2dSSteve Wise disconnect = 1; 20858da7e7a5SSteve Wise } 2086cfdda9d7SSteve Wise break; 2087cfdda9d7SSteve Wise case ABORTING: 2088cfdda9d7SSteve Wise disconnect = 0; 2089cfdda9d7SSteve Wise break; 2090cfdda9d7SSteve Wise case CLOSING: 2091cfdda9d7SSteve Wise __state_set(&ep->com, MORIBUND); 2092cfdda9d7SSteve Wise disconnect = 0; 2093cfdda9d7SSteve Wise break; 2094cfdda9d7SSteve Wise case MORIBUND: 2095ca5a2202SSteve Wise stop_ep_timer(ep); 2096cfdda9d7SSteve Wise if (ep->com.cm_id && ep->com.qp) { 2097cfdda9d7SSteve Wise attrs.next_state = C4IW_QP_STATE_IDLE; 2098cfdda9d7SSteve Wise c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2099cfdda9d7SSteve Wise C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2100cfdda9d7SSteve Wise } 2101cfdda9d7SSteve Wise close_complete_upcall(ep); 2102cfdda9d7SSteve Wise __state_set(&ep->com, DEAD); 2103cfdda9d7SSteve Wise release = 1; 2104cfdda9d7SSteve Wise disconnect = 0; 2105cfdda9d7SSteve Wise break; 2106cfdda9d7SSteve Wise case DEAD: 2107cfdda9d7SSteve Wise disconnect = 0; 2108cfdda9d7SSteve Wise break; 2109cfdda9d7SSteve Wise default: 2110cfdda9d7SSteve Wise BUG_ON(1); 2111cfdda9d7SSteve Wise } 21122f5b48c3SSteve Wise mutex_unlock(&ep->com.mutex); 2113cfdda9d7SSteve Wise if (disconnect) 2114cfdda9d7SSteve Wise c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 2115cfdda9d7SSteve Wise if (release) 2116cfdda9d7SSteve Wise release_ep_resources(ep); 2117cfdda9d7SSteve Wise return 0; 2118cfdda9d7SSteve Wise } 2119cfdda9d7SSteve Wise 2120cfdda9d7SSteve Wise /* 2121cfdda9d7SSteve Wise * Returns whether an ABORT_REQ_RSS message is a negative advice. 2122cfdda9d7SSteve Wise */ 2123cfdda9d7SSteve Wise static int is_neg_adv_abort(unsigned int status) 2124cfdda9d7SSteve Wise { 2125cfdda9d7SSteve Wise return status == CPL_ERR_RTX_NEG_ADVICE || 2126cfdda9d7SSteve Wise status == CPL_ERR_PERSIST_NEG_ADVICE; 2127cfdda9d7SSteve Wise } 2128cfdda9d7SSteve Wise 2129cfdda9d7SSteve Wise static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) 2130cfdda9d7SSteve Wise { 2131cfdda9d7SSteve Wise struct cpl_abort_req_rss *req = cplhdr(skb); 2132cfdda9d7SSteve Wise struct c4iw_ep *ep; 2133cfdda9d7SSteve Wise struct cpl_abort_rpl *rpl; 2134cfdda9d7SSteve Wise struct sk_buff *rpl_skb; 2135cfdda9d7SSteve Wise struct c4iw_qp_attributes attrs; 2136cfdda9d7SSteve Wise int ret; 2137cfdda9d7SSteve Wise int release = 0; 2138cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 2139cfdda9d7SSteve Wise unsigned int tid = GET_TID(req); 2140cfdda9d7SSteve Wise 2141cfdda9d7SSteve Wise ep = lookup_tid(t, tid); 2142cfdda9d7SSteve Wise if (is_neg_adv_abort(req->status)) { 2143cfdda9d7SSteve Wise PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, 2144cfdda9d7SSteve Wise ep->hwtid); 2145cfdda9d7SSteve Wise return 0; 2146cfdda9d7SSteve Wise } 2147cfdda9d7SSteve Wise PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 2148cfdda9d7SSteve Wise ep->com.state); 2149793dad94SVipul Pandya set_bit(PEER_ABORT, &ep->com.history); 21502f5b48c3SSteve Wise 21512f5b48c3SSteve Wise /* 21522f5b48c3SSteve Wise * Wake up any threads in rdma_init() or rdma_fini(). 2153d2fe99e8SKumar Sanghvi * However, this is not needed if com state is just 2154d2fe99e8SKumar Sanghvi * MPA_REQ_SENT 21552f5b48c3SSteve Wise */ 2156d2fe99e8SKumar Sanghvi if (ep->com.state != MPA_REQ_SENT) 2157d9594d99SSteve Wise c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 21582f5b48c3SSteve Wise 21592f5b48c3SSteve Wise mutex_lock(&ep->com.mutex); 2160cfdda9d7SSteve Wise switch (ep->com.state) { 2161cfdda9d7SSteve Wise case CONNECTING: 2162cfdda9d7SSteve Wise break; 2163cfdda9d7SSteve Wise case MPA_REQ_WAIT: 2164ca5a2202SSteve Wise stop_ep_timer(ep); 2165cfdda9d7SSteve Wise break; 2166cfdda9d7SSteve Wise case MPA_REQ_SENT: 2167ca5a2202SSteve Wise stop_ep_timer(ep); 2168d2fe99e8SKumar Sanghvi if (mpa_rev == 2 && ep->tried_with_mpa_v1) 2169cfdda9d7SSteve Wise connect_reply_upcall(ep, -ECONNRESET); 2170d2fe99e8SKumar Sanghvi else { 2171d2fe99e8SKumar Sanghvi /* 2172d2fe99e8SKumar Sanghvi * we just don't send notification upwards because we 2173d2fe99e8SKumar Sanghvi * want to retry with mpa_v1 without upper layers even 2174d2fe99e8SKumar Sanghvi * knowing it. 2175d2fe99e8SKumar Sanghvi * 2176d2fe99e8SKumar Sanghvi * do some housekeeping so as to re-initiate the 2177d2fe99e8SKumar Sanghvi * connection 2178d2fe99e8SKumar Sanghvi */ 2179d2fe99e8SKumar Sanghvi PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__, 2180d2fe99e8SKumar Sanghvi mpa_rev); 2181d2fe99e8SKumar Sanghvi ep->retry_with_mpa_v1 = 1; 2182d2fe99e8SKumar Sanghvi } 2183cfdda9d7SSteve Wise break; 2184cfdda9d7SSteve Wise case MPA_REP_SENT: 2185cfdda9d7SSteve Wise break; 2186cfdda9d7SSteve Wise case MPA_REQ_RCVD: 2187cfdda9d7SSteve Wise break; 2188cfdda9d7SSteve Wise case MORIBUND: 2189cfdda9d7SSteve Wise case CLOSING: 2190ca5a2202SSteve Wise stop_ep_timer(ep); 2191cfdda9d7SSteve Wise /*FALLTHROUGH*/ 2192cfdda9d7SSteve Wise case FPDU_MODE: 2193cfdda9d7SSteve Wise if (ep->com.cm_id && ep->com.qp) { 2194cfdda9d7SSteve Wise attrs.next_state = C4IW_QP_STATE_ERROR; 2195cfdda9d7SSteve Wise ret = c4iw_modify_qp(ep->com.qp->rhp, 2196cfdda9d7SSteve Wise ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 2197cfdda9d7SSteve Wise &attrs, 1); 2198cfdda9d7SSteve Wise if (ret) 2199cfdda9d7SSteve Wise printk(KERN_ERR MOD 2200cfdda9d7SSteve Wise "%s - qp <- error failed!\n", 2201cfdda9d7SSteve Wise __func__); 2202cfdda9d7SSteve Wise } 2203cfdda9d7SSteve Wise peer_abort_upcall(ep); 2204cfdda9d7SSteve Wise break; 2205cfdda9d7SSteve Wise case ABORTING: 2206cfdda9d7SSteve Wise break; 2207cfdda9d7SSteve Wise case DEAD: 2208cfdda9d7SSteve Wise PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); 22092f5b48c3SSteve Wise mutex_unlock(&ep->com.mutex); 2210cfdda9d7SSteve Wise return 0; 2211cfdda9d7SSteve Wise default: 2212cfdda9d7SSteve Wise BUG_ON(1); 2213cfdda9d7SSteve Wise break; 2214cfdda9d7SSteve Wise } 2215cfdda9d7SSteve Wise dst_confirm(ep->dst); 2216cfdda9d7SSteve Wise if (ep->com.state != ABORTING) { 2217cfdda9d7SSteve Wise __state_set(&ep->com, DEAD); 2218d2fe99e8SKumar Sanghvi /* we don't release if we want to retry with mpa_v1 */ 2219d2fe99e8SKumar Sanghvi if (!ep->retry_with_mpa_v1) 2220cfdda9d7SSteve Wise release = 1; 2221cfdda9d7SSteve Wise } 22222f5b48c3SSteve Wise mutex_unlock(&ep->com.mutex); 2223cfdda9d7SSteve Wise 2224cfdda9d7SSteve Wise rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); 2225cfdda9d7SSteve Wise if (!rpl_skb) { 2226cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - cannot allocate skb!\n", 2227cfdda9d7SSteve Wise __func__); 2228cfdda9d7SSteve Wise release = 1; 2229cfdda9d7SSteve Wise goto out; 2230cfdda9d7SSteve Wise } 2231cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 2232cfdda9d7SSteve Wise rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); 2233cfdda9d7SSteve Wise INIT_TP_WR(rpl, ep->hwtid); 2234cfdda9d7SSteve Wise OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); 2235cfdda9d7SSteve Wise rpl->cmd = CPL_ABORT_NO_RST; 2236cfdda9d7SSteve Wise c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); 2237cfdda9d7SSteve Wise out: 2238cfdda9d7SSteve Wise if (release) 2239cfdda9d7SSteve Wise release_ep_resources(ep); 2240d2fe99e8SKumar Sanghvi 2241d2fe99e8SKumar Sanghvi /* retry with mpa-v1 */ 2242d2fe99e8SKumar Sanghvi if (ep && ep->retry_with_mpa_v1) { 2243d2fe99e8SKumar Sanghvi cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); 2244d2fe99e8SKumar Sanghvi dst_release(ep->dst); 2245d2fe99e8SKumar Sanghvi cxgb4_l2t_release(ep->l2t); 2246d2fe99e8SKumar Sanghvi c4iw_reconnect(ep); 2247d2fe99e8SKumar Sanghvi } 2248d2fe99e8SKumar Sanghvi 2249cfdda9d7SSteve Wise return 0; 2250cfdda9d7SSteve Wise } 2251cfdda9d7SSteve Wise 2252cfdda9d7SSteve Wise static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2253cfdda9d7SSteve Wise { 2254cfdda9d7SSteve Wise struct c4iw_ep *ep; 2255cfdda9d7SSteve Wise struct c4iw_qp_attributes attrs; 2256cfdda9d7SSteve Wise struct cpl_close_con_rpl *rpl = cplhdr(skb); 2257cfdda9d7SSteve Wise int release = 0; 2258cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 2259cfdda9d7SSteve Wise unsigned int tid = GET_TID(rpl); 2260cfdda9d7SSteve Wise 2261cfdda9d7SSteve Wise ep = lookup_tid(t, tid); 2262cfdda9d7SSteve Wise 2263cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2264cfdda9d7SSteve Wise BUG_ON(!ep); 2265cfdda9d7SSteve Wise 2266cfdda9d7SSteve Wise /* The cm_id may be null if we failed to connect */ 22672f5b48c3SSteve Wise mutex_lock(&ep->com.mutex); 2268cfdda9d7SSteve Wise switch (ep->com.state) { 2269cfdda9d7SSteve Wise case CLOSING: 2270cfdda9d7SSteve Wise __state_set(&ep->com, MORIBUND); 2271cfdda9d7SSteve Wise break; 2272cfdda9d7SSteve Wise case MORIBUND: 2273ca5a2202SSteve Wise stop_ep_timer(ep); 2274cfdda9d7SSteve Wise if ((ep->com.cm_id) && (ep->com.qp)) { 2275cfdda9d7SSteve Wise attrs.next_state = C4IW_QP_STATE_IDLE; 2276cfdda9d7SSteve Wise c4iw_modify_qp(ep->com.qp->rhp, 2277cfdda9d7SSteve Wise ep->com.qp, 2278cfdda9d7SSteve Wise C4IW_QP_ATTR_NEXT_STATE, 2279cfdda9d7SSteve Wise &attrs, 1); 2280cfdda9d7SSteve Wise } 2281cfdda9d7SSteve Wise close_complete_upcall(ep); 2282cfdda9d7SSteve Wise __state_set(&ep->com, DEAD); 2283cfdda9d7SSteve Wise release = 1; 2284cfdda9d7SSteve Wise break; 2285cfdda9d7SSteve Wise case ABORTING: 2286cfdda9d7SSteve Wise case DEAD: 2287cfdda9d7SSteve Wise break; 2288cfdda9d7SSteve Wise default: 2289cfdda9d7SSteve Wise BUG_ON(1); 2290cfdda9d7SSteve Wise break; 2291cfdda9d7SSteve Wise } 22922f5b48c3SSteve Wise mutex_unlock(&ep->com.mutex); 2293cfdda9d7SSteve Wise if (release) 2294cfdda9d7SSteve Wise release_ep_resources(ep); 2295cfdda9d7SSteve Wise return 0; 2296cfdda9d7SSteve Wise } 2297cfdda9d7SSteve Wise 2298cfdda9d7SSteve Wise static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) 2299cfdda9d7SSteve Wise { 23000e42c1f4SSteve Wise struct cpl_rdma_terminate *rpl = cplhdr(skb); 2301cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 23020e42c1f4SSteve Wise unsigned int tid = GET_TID(rpl); 23030e42c1f4SSteve Wise struct c4iw_ep *ep; 23040e42c1f4SSteve Wise struct c4iw_qp_attributes attrs; 2305cfdda9d7SSteve Wise 2306cfdda9d7SSteve Wise ep = lookup_tid(t, tid); 23070e42c1f4SSteve Wise BUG_ON(!ep); 2308cfdda9d7SSteve Wise 230930c95c2dSSteve Wise if (ep && ep->com.qp) { 23100e42c1f4SSteve Wise printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, 23110e42c1f4SSteve Wise ep->com.qp->wq.sq.qid); 23120e42c1f4SSteve Wise attrs.next_state = C4IW_QP_STATE_TERMINATE; 23130e42c1f4SSteve Wise c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 23140e42c1f4SSteve Wise C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 23150e42c1f4SSteve Wise } else 231630c95c2dSSteve Wise printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); 2317cfdda9d7SSteve Wise 2318cfdda9d7SSteve Wise return 0; 2319cfdda9d7SSteve Wise } 2320cfdda9d7SSteve Wise 2321cfdda9d7SSteve Wise /* 2322cfdda9d7SSteve Wise * Upcall from the adapter indicating data has been transmitted. 2323cfdda9d7SSteve Wise * For us its just the single MPA request or reply. We can now free 2324cfdda9d7SSteve Wise * the skb holding the mpa message. 2325cfdda9d7SSteve Wise */ 2326cfdda9d7SSteve Wise static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) 2327cfdda9d7SSteve Wise { 2328cfdda9d7SSteve Wise struct c4iw_ep *ep; 2329cfdda9d7SSteve Wise struct cpl_fw4_ack *hdr = cplhdr(skb); 2330cfdda9d7SSteve Wise u8 credits = hdr->credits; 2331cfdda9d7SSteve Wise unsigned int tid = GET_TID(hdr); 2332cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 2333cfdda9d7SSteve Wise 2334cfdda9d7SSteve Wise 2335cfdda9d7SSteve Wise ep = lookup_tid(t, tid); 2336cfdda9d7SSteve Wise PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); 2337cfdda9d7SSteve Wise if (credits == 0) { 2338aa1ad260SJoe Perches PDBG("%s 0 credit ack ep %p tid %u state %u\n", 2339cfdda9d7SSteve Wise __func__, ep, ep->hwtid, state_read(&ep->com)); 2340cfdda9d7SSteve Wise return 0; 2341cfdda9d7SSteve Wise } 2342cfdda9d7SSteve Wise 2343cfdda9d7SSteve Wise dst_confirm(ep->dst); 2344cfdda9d7SSteve Wise if (ep->mpa_skb) { 2345cfdda9d7SSteve Wise PDBG("%s last streaming msg ack ep %p tid %u state %u " 2346cfdda9d7SSteve Wise "initiator %u freeing skb\n", __func__, ep, ep->hwtid, 2347cfdda9d7SSteve Wise state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); 2348cfdda9d7SSteve Wise kfree_skb(ep->mpa_skb); 2349cfdda9d7SSteve Wise ep->mpa_skb = NULL; 2350cfdda9d7SSteve Wise } 2351cfdda9d7SSteve Wise return 0; 2352cfdda9d7SSteve Wise } 2353cfdda9d7SSteve Wise 2354cfdda9d7SSteve Wise int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 2355cfdda9d7SSteve Wise { 2356cfdda9d7SSteve Wise int err; 2357cfdda9d7SSteve Wise struct c4iw_ep *ep = to_ep(cm_id); 2358cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2359cfdda9d7SSteve Wise 2360cfdda9d7SSteve Wise if (state_read(&ep->com) == DEAD) { 2361cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 2362cfdda9d7SSteve Wise return -ECONNRESET; 2363cfdda9d7SSteve Wise } 2364793dad94SVipul Pandya set_bit(ULP_REJECT, &ep->com.history); 2365cfdda9d7SSteve Wise BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 2366cfdda9d7SSteve Wise if (mpa_rev == 0) 2367cfdda9d7SSteve Wise abort_connection(ep, NULL, GFP_KERNEL); 2368cfdda9d7SSteve Wise else { 2369cfdda9d7SSteve Wise err = send_mpa_reject(ep, pdata, pdata_len); 2370cfdda9d7SSteve Wise err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 2371cfdda9d7SSteve Wise } 2372cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 2373cfdda9d7SSteve Wise return 0; 2374cfdda9d7SSteve Wise } 2375cfdda9d7SSteve Wise 2376cfdda9d7SSteve Wise int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 2377cfdda9d7SSteve Wise { 2378cfdda9d7SSteve Wise int err; 2379cfdda9d7SSteve Wise struct c4iw_qp_attributes attrs; 2380cfdda9d7SSteve Wise enum c4iw_qp_attr_mask mask; 2381cfdda9d7SSteve Wise struct c4iw_ep *ep = to_ep(cm_id); 2382cfdda9d7SSteve Wise struct c4iw_dev *h = to_c4iw_dev(cm_id->device); 2383cfdda9d7SSteve Wise struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); 2384cfdda9d7SSteve Wise 2385cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2386cfdda9d7SSteve Wise if (state_read(&ep->com) == DEAD) { 2387cfdda9d7SSteve Wise err = -ECONNRESET; 2388cfdda9d7SSteve Wise goto err; 2389cfdda9d7SSteve Wise } 2390cfdda9d7SSteve Wise 2391cfdda9d7SSteve Wise BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 2392cfdda9d7SSteve Wise BUG_ON(!qp); 2393cfdda9d7SSteve Wise 2394793dad94SVipul Pandya set_bit(ULP_ACCEPT, &ep->com.history); 2395be4c9badSRoland Dreier if ((conn_param->ord > c4iw_max_read_depth) || 2396be4c9badSRoland Dreier (conn_param->ird > c4iw_max_read_depth)) { 2397cfdda9d7SSteve Wise abort_connection(ep, NULL, GFP_KERNEL); 2398cfdda9d7SSteve Wise err = -EINVAL; 2399cfdda9d7SSteve Wise goto err; 2400cfdda9d7SSteve Wise } 2401cfdda9d7SSteve Wise 2402d2fe99e8SKumar Sanghvi if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 2403d2fe99e8SKumar Sanghvi if (conn_param->ord > ep->ird) { 2404d2fe99e8SKumar Sanghvi ep->ird = conn_param->ird; 2405d2fe99e8SKumar Sanghvi ep->ord = conn_param->ord; 2406d2fe99e8SKumar Sanghvi send_mpa_reject(ep, conn_param->private_data, 2407d2fe99e8SKumar Sanghvi conn_param->private_data_len); 2408d2fe99e8SKumar Sanghvi abort_connection(ep, NULL, GFP_KERNEL); 2409d2fe99e8SKumar Sanghvi err = -ENOMEM; 2410d2fe99e8SKumar Sanghvi goto err; 2411d2fe99e8SKumar Sanghvi } 2412d2fe99e8SKumar Sanghvi if (conn_param->ird > ep->ord) { 2413d2fe99e8SKumar Sanghvi if (!ep->ord) 2414d2fe99e8SKumar Sanghvi conn_param->ird = 1; 2415d2fe99e8SKumar Sanghvi else { 2416d2fe99e8SKumar Sanghvi abort_connection(ep, NULL, GFP_KERNEL); 2417d2fe99e8SKumar Sanghvi err = -ENOMEM; 2418d2fe99e8SKumar Sanghvi goto err; 2419d2fe99e8SKumar Sanghvi } 2420d2fe99e8SKumar Sanghvi } 2421cfdda9d7SSteve Wise 2422d2fe99e8SKumar Sanghvi } 2423cfdda9d7SSteve Wise ep->ird = conn_param->ird; 2424cfdda9d7SSteve Wise ep->ord = conn_param->ord; 2425cfdda9d7SSteve Wise 2426d2fe99e8SKumar Sanghvi if (ep->mpa_attr.version != 2) 2427cfdda9d7SSteve Wise if (peer2peer && ep->ird == 0) 2428cfdda9d7SSteve Wise ep->ird = 1; 2429cfdda9d7SSteve Wise 2430cfdda9d7SSteve Wise PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); 2431cfdda9d7SSteve Wise 2432d2fe99e8SKumar Sanghvi cm_id->add_ref(cm_id); 2433d2fe99e8SKumar Sanghvi ep->com.cm_id = cm_id; 2434d2fe99e8SKumar Sanghvi ep->com.qp = qp; 2435d2fe99e8SKumar Sanghvi 2436cfdda9d7SSteve Wise /* bind QP to EP and move to RTS */ 2437cfdda9d7SSteve Wise attrs.mpa_attr = ep->mpa_attr; 2438cfdda9d7SSteve Wise attrs.max_ird = ep->ird; 2439cfdda9d7SSteve Wise attrs.max_ord = ep->ord; 2440cfdda9d7SSteve Wise attrs.llp_stream_handle = ep; 2441cfdda9d7SSteve Wise attrs.next_state = C4IW_QP_STATE_RTS; 2442cfdda9d7SSteve Wise 2443cfdda9d7SSteve Wise /* bind QP and TID with INIT_WR */ 2444cfdda9d7SSteve Wise mask = C4IW_QP_ATTR_NEXT_STATE | 2445cfdda9d7SSteve Wise C4IW_QP_ATTR_LLP_STREAM_HANDLE | 2446cfdda9d7SSteve Wise C4IW_QP_ATTR_MPA_ATTR | 2447cfdda9d7SSteve Wise C4IW_QP_ATTR_MAX_IRD | 2448cfdda9d7SSteve Wise C4IW_QP_ATTR_MAX_ORD; 2449cfdda9d7SSteve Wise 2450cfdda9d7SSteve Wise err = c4iw_modify_qp(ep->com.qp->rhp, 2451cfdda9d7SSteve Wise ep->com.qp, mask, &attrs, 1); 2452cfdda9d7SSteve Wise if (err) 2453cfdda9d7SSteve Wise goto err1; 2454cfdda9d7SSteve Wise err = send_mpa_reply(ep, conn_param->private_data, 2455cfdda9d7SSteve Wise conn_param->private_data_len); 2456cfdda9d7SSteve Wise if (err) 2457cfdda9d7SSteve Wise goto err1; 2458cfdda9d7SSteve Wise 2459cfdda9d7SSteve Wise state_set(&ep->com, FPDU_MODE); 2460cfdda9d7SSteve Wise established_upcall(ep); 2461cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 2462cfdda9d7SSteve Wise return 0; 2463cfdda9d7SSteve Wise err1: 2464cfdda9d7SSteve Wise ep->com.cm_id = NULL; 2465cfdda9d7SSteve Wise ep->com.qp = NULL; 2466cfdda9d7SSteve Wise cm_id->rem_ref(cm_id); 2467cfdda9d7SSteve Wise err: 2468cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 2469cfdda9d7SSteve Wise return err; 2470cfdda9d7SSteve Wise } 2471cfdda9d7SSteve Wise 2472cfdda9d7SSteve Wise int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 2473cfdda9d7SSteve Wise { 2474cfdda9d7SSteve Wise struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2475cfdda9d7SSteve Wise struct c4iw_ep *ep; 2476cfdda9d7SSteve Wise struct rtable *rt; 24773786cf18SDavid Miller int err = 0; 2478cfdda9d7SSteve Wise 2479be4c9badSRoland Dreier if ((conn_param->ord > c4iw_max_read_depth) || 2480be4c9badSRoland Dreier (conn_param->ird > c4iw_max_read_depth)) { 2481be4c9badSRoland Dreier err = -EINVAL; 2482be4c9badSRoland Dreier goto out; 2483be4c9badSRoland Dreier } 2484cfdda9d7SSteve Wise ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 2485cfdda9d7SSteve Wise if (!ep) { 2486cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 2487cfdda9d7SSteve Wise err = -ENOMEM; 2488cfdda9d7SSteve Wise goto out; 2489cfdda9d7SSteve Wise } 2490cfdda9d7SSteve Wise init_timer(&ep->timer); 2491cfdda9d7SSteve Wise ep->plen = conn_param->private_data_len; 2492cfdda9d7SSteve Wise if (ep->plen) 2493cfdda9d7SSteve Wise memcpy(ep->mpa_pkt + sizeof(struct mpa_message), 2494cfdda9d7SSteve Wise conn_param->private_data, ep->plen); 2495cfdda9d7SSteve Wise ep->ird = conn_param->ird; 2496cfdda9d7SSteve Wise ep->ord = conn_param->ord; 2497cfdda9d7SSteve Wise 2498cfdda9d7SSteve Wise if (peer2peer && ep->ord == 0) 2499cfdda9d7SSteve Wise ep->ord = 1; 2500cfdda9d7SSteve Wise 2501cfdda9d7SSteve Wise cm_id->add_ref(cm_id); 2502cfdda9d7SSteve Wise ep->com.dev = dev; 2503cfdda9d7SSteve Wise ep->com.cm_id = cm_id; 2504cfdda9d7SSteve Wise ep->com.qp = get_qhp(dev, conn_param->qpn); 2505cfdda9d7SSteve Wise BUG_ON(!ep->com.qp); 2506cfdda9d7SSteve Wise PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, 2507cfdda9d7SSteve Wise ep->com.qp, cm_id); 2508cfdda9d7SSteve Wise 2509cfdda9d7SSteve Wise /* 2510cfdda9d7SSteve Wise * Allocate an active TID to initiate a TCP connection. 2511cfdda9d7SSteve Wise */ 2512cfdda9d7SSteve Wise ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep); 2513cfdda9d7SSteve Wise if (ep->atid == -1) { 2514cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); 2515cfdda9d7SSteve Wise err = -ENOMEM; 2516cfdda9d7SSteve Wise goto fail2; 2517cfdda9d7SSteve Wise } 2518793dad94SVipul Pandya insert_handle(dev, &dev->atid_idr, ep, ep->atid); 2519cfdda9d7SSteve Wise 2520cfdda9d7SSteve Wise PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__, 2521cfdda9d7SSteve Wise ntohl(cm_id->local_addr.sin_addr.s_addr), 2522cfdda9d7SSteve Wise ntohs(cm_id->local_addr.sin_port), 2523cfdda9d7SSteve Wise ntohl(cm_id->remote_addr.sin_addr.s_addr), 2524cfdda9d7SSteve Wise ntohs(cm_id->remote_addr.sin_port)); 2525cfdda9d7SSteve Wise 2526cfdda9d7SSteve Wise /* find a route */ 2527cfdda9d7SSteve Wise rt = find_route(dev, 2528cfdda9d7SSteve Wise cm_id->local_addr.sin_addr.s_addr, 2529cfdda9d7SSteve Wise cm_id->remote_addr.sin_addr.s_addr, 2530cfdda9d7SSteve Wise cm_id->local_addr.sin_port, 2531cfdda9d7SSteve Wise cm_id->remote_addr.sin_port, 0); 2532cfdda9d7SSteve Wise if (!rt) { 2533cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); 2534cfdda9d7SSteve Wise err = -EHOSTUNREACH; 2535cfdda9d7SSteve Wise goto fail3; 2536cfdda9d7SSteve Wise } 2537d8d1f30bSChangli Gao ep->dst = &rt->dst; 2538cfdda9d7SSteve Wise 25393786cf18SDavid Miller err = import_ep(ep, cm_id->remote_addr.sin_addr.s_addr, 25403786cf18SDavid Miller ep->dst, ep->com.dev, true); 25413786cf18SDavid Miller if (err) { 2542cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 2543cfdda9d7SSteve Wise goto fail4; 2544cfdda9d7SSteve Wise } 2545cfdda9d7SSteve Wise 2546cfdda9d7SSteve Wise PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 2547cfdda9d7SSteve Wise __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 2548cfdda9d7SSteve Wise ep->l2t->idx); 2549cfdda9d7SSteve Wise 2550cfdda9d7SSteve Wise state_set(&ep->com, CONNECTING); 2551cfdda9d7SSteve Wise ep->tos = 0; 2552cfdda9d7SSteve Wise ep->com.local_addr = cm_id->local_addr; 2553cfdda9d7SSteve Wise ep->com.remote_addr = cm_id->remote_addr; 2554cfdda9d7SSteve Wise 2555cfdda9d7SSteve Wise /* send connect request to rnic */ 2556cfdda9d7SSteve Wise err = send_connect(ep); 2557cfdda9d7SSteve Wise if (!err) 2558cfdda9d7SSteve Wise goto out; 2559cfdda9d7SSteve Wise 2560cfdda9d7SSteve Wise cxgb4_l2t_release(ep->l2t); 2561cfdda9d7SSteve Wise fail4: 2562cfdda9d7SSteve Wise dst_release(ep->dst); 2563cfdda9d7SSteve Wise fail3: 2564793dad94SVipul Pandya remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 2565cfdda9d7SSteve Wise cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 2566cfdda9d7SSteve Wise fail2: 2567cfdda9d7SSteve Wise cm_id->rem_ref(cm_id); 2568cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 2569cfdda9d7SSteve Wise out: 2570cfdda9d7SSteve Wise return err; 2571cfdda9d7SSteve Wise } 2572cfdda9d7SSteve Wise 2573cfdda9d7SSteve Wise int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) 2574cfdda9d7SSteve Wise { 2575cfdda9d7SSteve Wise int err = 0; 2576cfdda9d7SSteve Wise struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2577cfdda9d7SSteve Wise struct c4iw_listen_ep *ep; 2578cfdda9d7SSteve Wise 2579cfdda9d7SSteve Wise might_sleep(); 2580cfdda9d7SSteve Wise 2581cfdda9d7SSteve Wise ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 2582cfdda9d7SSteve Wise if (!ep) { 2583cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 2584cfdda9d7SSteve Wise err = -ENOMEM; 2585cfdda9d7SSteve Wise goto fail1; 2586cfdda9d7SSteve Wise } 2587cfdda9d7SSteve Wise PDBG("%s ep %p\n", __func__, ep); 2588cfdda9d7SSteve Wise cm_id->add_ref(cm_id); 2589cfdda9d7SSteve Wise ep->com.cm_id = cm_id; 2590cfdda9d7SSteve Wise ep->com.dev = dev; 2591cfdda9d7SSteve Wise ep->backlog = backlog; 2592cfdda9d7SSteve Wise ep->com.local_addr = cm_id->local_addr; 2593cfdda9d7SSteve Wise 2594cfdda9d7SSteve Wise /* 2595cfdda9d7SSteve Wise * Allocate a server TID. 2596cfdda9d7SSteve Wise */ 25971cab775cSVipul Pandya if (dev->rdev.lldi.enable_fw_ofld_conn) 25981cab775cSVipul Pandya ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, PF_INET, ep); 25991cab775cSVipul Pandya else 2600cfdda9d7SSteve Wise ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep); 26011cab775cSVipul Pandya 2602cfdda9d7SSteve Wise if (ep->stid == -1) { 2603be4c9badSRoland Dreier printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__); 2604cfdda9d7SSteve Wise err = -ENOMEM; 2605cfdda9d7SSteve Wise goto fail2; 2606cfdda9d7SSteve Wise } 2607793dad94SVipul Pandya insert_handle(dev, &dev->stid_idr, ep, ep->stid); 2608cfdda9d7SSteve Wise state_set(&ep->com, LISTEN); 26091cab775cSVipul Pandya if (dev->rdev.lldi.enable_fw_ofld_conn) { 26101cab775cSVipul Pandya do { 26111cab775cSVipul Pandya err = cxgb4_create_server_filter( 26121cab775cSVipul Pandya ep->com.dev->rdev.lldi.ports[0], ep->stid, 2613cfdda9d7SSteve Wise ep->com.local_addr.sin_addr.s_addr, 2614cfdda9d7SSteve Wise ep->com.local_addr.sin_port, 2615793dad94SVipul Pandya 0, 2616793dad94SVipul Pandya ep->com.dev->rdev.lldi.rxq_ids[0], 2617793dad94SVipul Pandya 0, 2618793dad94SVipul Pandya 0); 26191cab775cSVipul Pandya if (err == -EBUSY) { 26201cab775cSVipul Pandya set_current_state(TASK_UNINTERRUPTIBLE); 26211cab775cSVipul Pandya schedule_timeout(usecs_to_jiffies(100)); 26221cab775cSVipul Pandya } 26231cab775cSVipul Pandya } while (err == -EBUSY); 26241cab775cSVipul Pandya } else { 26251cab775cSVipul Pandya c4iw_init_wr_wait(&ep->com.wr_wait); 26261cab775cSVipul Pandya err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], 26271cab775cSVipul Pandya ep->stid, ep->com.local_addr.sin_addr.s_addr, 26281cab775cSVipul Pandya ep->com.local_addr.sin_port, 2629793dad94SVipul Pandya 0, 26301cab775cSVipul Pandya ep->com.dev->rdev.lldi.rxq_ids[0]); 26311cab775cSVipul Pandya if (!err) 26321cab775cSVipul Pandya err = c4iw_wait_for_reply(&ep->com.dev->rdev, 26331cab775cSVipul Pandya &ep->com.wr_wait, 26341cab775cSVipul Pandya 0, 0, __func__); 26351cab775cSVipul Pandya } 2636cfdda9d7SSteve Wise if (!err) { 2637cfdda9d7SSteve Wise cm_id->provider_data = ep; 2638cfdda9d7SSteve Wise goto out; 2639cfdda9d7SSteve Wise } 26401cab775cSVipul Pandya pr_err("%s cxgb4_create_server/filter failed err %d " \ 26411cab775cSVipul Pandya "stid %d laddr %08x lport %d\n", \ 26421cab775cSVipul Pandya __func__, err, ep->stid, 26431cab775cSVipul Pandya ntohl(ep->com.local_addr.sin_addr.s_addr), 26441cab775cSVipul Pandya ntohs(ep->com.local_addr.sin_port)); 2645cfdda9d7SSteve Wise cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); 2646cfdda9d7SSteve Wise fail2: 2647cfdda9d7SSteve Wise cm_id->rem_ref(cm_id); 2648cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 2649cfdda9d7SSteve Wise fail1: 2650cfdda9d7SSteve Wise out: 2651cfdda9d7SSteve Wise return err; 2652cfdda9d7SSteve Wise } 2653cfdda9d7SSteve Wise 2654cfdda9d7SSteve Wise int c4iw_destroy_listen(struct iw_cm_id *cm_id) 2655cfdda9d7SSteve Wise { 2656cfdda9d7SSteve Wise int err; 2657cfdda9d7SSteve Wise struct c4iw_listen_ep *ep = to_listen_ep(cm_id); 2658cfdda9d7SSteve Wise 2659cfdda9d7SSteve Wise PDBG("%s ep %p\n", __func__, ep); 2660cfdda9d7SSteve Wise 2661cfdda9d7SSteve Wise might_sleep(); 2662cfdda9d7SSteve Wise state_set(&ep->com, DEAD); 26631cab775cSVipul Pandya if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn) { 26641cab775cSVipul Pandya err = cxgb4_remove_server_filter( 26651cab775cSVipul Pandya ep->com.dev->rdev.lldi.ports[0], ep->stid, 26661cab775cSVipul Pandya ep->com.dev->rdev.lldi.rxq_ids[0], 0); 26671cab775cSVipul Pandya } else { 2668aadc4df3SSteve Wise c4iw_init_wr_wait(&ep->com.wr_wait); 2669cfdda9d7SSteve Wise err = listen_stop(ep); 2670cfdda9d7SSteve Wise if (err) 2671cfdda9d7SSteve Wise goto done; 26721cab775cSVipul Pandya err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 26731cab775cSVipul Pandya 0, 0, __func__); 26741cab775cSVipul Pandya } 2675793dad94SVipul Pandya remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid); 2676cfdda9d7SSteve Wise cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); 2677cfdda9d7SSteve Wise done: 2678cfdda9d7SSteve Wise cm_id->rem_ref(cm_id); 2679cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 2680cfdda9d7SSteve Wise return err; 2681cfdda9d7SSteve Wise } 2682cfdda9d7SSteve Wise 2683cfdda9d7SSteve Wise int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) 2684cfdda9d7SSteve Wise { 2685cfdda9d7SSteve Wise int ret = 0; 2686cfdda9d7SSteve Wise int close = 0; 2687cfdda9d7SSteve Wise int fatal = 0; 2688cfdda9d7SSteve Wise struct c4iw_rdev *rdev; 2689cfdda9d7SSteve Wise 26902f5b48c3SSteve Wise mutex_lock(&ep->com.mutex); 2691cfdda9d7SSteve Wise 2692cfdda9d7SSteve Wise PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, 2693cfdda9d7SSteve Wise states[ep->com.state], abrupt); 2694cfdda9d7SSteve Wise 2695cfdda9d7SSteve Wise rdev = &ep->com.dev->rdev; 2696cfdda9d7SSteve Wise if (c4iw_fatal_error(rdev)) { 2697cfdda9d7SSteve Wise fatal = 1; 2698cfdda9d7SSteve Wise close_complete_upcall(ep); 2699cfdda9d7SSteve Wise ep->com.state = DEAD; 2700cfdda9d7SSteve Wise } 2701cfdda9d7SSteve Wise switch (ep->com.state) { 2702cfdda9d7SSteve Wise case MPA_REQ_WAIT: 2703cfdda9d7SSteve Wise case MPA_REQ_SENT: 2704cfdda9d7SSteve Wise case MPA_REQ_RCVD: 2705cfdda9d7SSteve Wise case MPA_REP_SENT: 2706cfdda9d7SSteve Wise case FPDU_MODE: 2707cfdda9d7SSteve Wise close = 1; 2708cfdda9d7SSteve Wise if (abrupt) 2709cfdda9d7SSteve Wise ep->com.state = ABORTING; 2710cfdda9d7SSteve Wise else { 2711cfdda9d7SSteve Wise ep->com.state = CLOSING; 2712ca5a2202SSteve Wise start_ep_timer(ep); 2713cfdda9d7SSteve Wise } 2714cfdda9d7SSteve Wise set_bit(CLOSE_SENT, &ep->com.flags); 2715cfdda9d7SSteve Wise break; 2716cfdda9d7SSteve Wise case CLOSING: 2717cfdda9d7SSteve Wise if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { 2718cfdda9d7SSteve Wise close = 1; 2719cfdda9d7SSteve Wise if (abrupt) { 2720ca5a2202SSteve Wise stop_ep_timer(ep); 2721cfdda9d7SSteve Wise ep->com.state = ABORTING; 2722cfdda9d7SSteve Wise } else 2723cfdda9d7SSteve Wise ep->com.state = MORIBUND; 2724cfdda9d7SSteve Wise } 2725cfdda9d7SSteve Wise break; 2726cfdda9d7SSteve Wise case MORIBUND: 2727cfdda9d7SSteve Wise case ABORTING: 2728cfdda9d7SSteve Wise case DEAD: 2729cfdda9d7SSteve Wise PDBG("%s ignoring disconnect ep %p state %u\n", 2730cfdda9d7SSteve Wise __func__, ep, ep->com.state); 2731cfdda9d7SSteve Wise break; 2732cfdda9d7SSteve Wise default: 2733cfdda9d7SSteve Wise BUG(); 2734cfdda9d7SSteve Wise break; 2735cfdda9d7SSteve Wise } 2736cfdda9d7SSteve Wise 2737cfdda9d7SSteve Wise if (close) { 27388da7e7a5SSteve Wise if (abrupt) { 2739793dad94SVipul Pandya set_bit(EP_DISC_ABORT, &ep->com.history); 27408da7e7a5SSteve Wise close_complete_upcall(ep); 27418da7e7a5SSteve Wise ret = send_abort(ep, NULL, gfp); 2742793dad94SVipul Pandya } else { 2743793dad94SVipul Pandya set_bit(EP_DISC_CLOSE, &ep->com.history); 2744cfdda9d7SSteve Wise ret = send_halfclose(ep, gfp); 2745793dad94SVipul Pandya } 2746cfdda9d7SSteve Wise if (ret) 2747cfdda9d7SSteve Wise fatal = 1; 2748cfdda9d7SSteve Wise } 27498da7e7a5SSteve Wise mutex_unlock(&ep->com.mutex); 2750cfdda9d7SSteve Wise if (fatal) 2751cfdda9d7SSteve Wise release_ep_resources(ep); 2752cfdda9d7SSteve Wise return ret; 2753cfdda9d7SSteve Wise } 2754cfdda9d7SSteve Wise 27551cab775cSVipul Pandya static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, 27561cab775cSVipul Pandya struct cpl_fw6_msg_ofld_connection_wr_rpl *req) 27571cab775cSVipul Pandya { 27581cab775cSVipul Pandya struct c4iw_ep *ep; 2759793dad94SVipul Pandya int atid = be32_to_cpu(req->tid); 27601cab775cSVipul Pandya 27611cab775cSVipul Pandya ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, req->tid); 27621cab775cSVipul Pandya if (!ep) 27631cab775cSVipul Pandya return; 27641cab775cSVipul Pandya 27651cab775cSVipul Pandya switch (req->retval) { 27661cab775cSVipul Pandya case FW_ENOMEM: 2767793dad94SVipul Pandya set_bit(ACT_RETRY_NOMEM, &ep->com.history); 2768793dad94SVipul Pandya if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 2769793dad94SVipul Pandya send_fw_act_open_req(ep, atid); 2770793dad94SVipul Pandya return; 2771793dad94SVipul Pandya } 27721cab775cSVipul Pandya case FW_EADDRINUSE: 2773793dad94SVipul Pandya set_bit(ACT_RETRY_INUSE, &ep->com.history); 2774793dad94SVipul Pandya if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 2775793dad94SVipul Pandya send_fw_act_open_req(ep, atid); 2776793dad94SVipul Pandya return; 2777793dad94SVipul Pandya } 27781cab775cSVipul Pandya break; 27791cab775cSVipul Pandya default: 27801cab775cSVipul Pandya pr_info("%s unexpected ofld conn wr retval %d\n", 27811cab775cSVipul Pandya __func__, req->retval); 27821cab775cSVipul Pandya break; 27831cab775cSVipul Pandya } 2784793dad94SVipul Pandya pr_err("active ofld_connect_wr failure %d atid %d\n", 2785793dad94SVipul Pandya req->retval, atid); 2786793dad94SVipul Pandya mutex_lock(&dev->rdev.stats.lock); 2787793dad94SVipul Pandya dev->rdev.stats.act_ofld_conn_fails++; 2788793dad94SVipul Pandya mutex_unlock(&dev->rdev.stats.lock); 27891cab775cSVipul Pandya connect_reply_upcall(ep, status2errno(req->retval)); 2790793dad94SVipul Pandya state_set(&ep->com, DEAD); 2791793dad94SVipul Pandya remove_handle(dev, &dev->atid_idr, atid); 2792793dad94SVipul Pandya cxgb4_free_atid(dev->rdev.lldi.tids, atid); 2793793dad94SVipul Pandya dst_release(ep->dst); 2794793dad94SVipul Pandya cxgb4_l2t_release(ep->l2t); 2795793dad94SVipul Pandya c4iw_put_ep(&ep->com); 27961cab775cSVipul Pandya } 27971cab775cSVipul Pandya 27981cab775cSVipul Pandya static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, 27991cab775cSVipul Pandya struct cpl_fw6_msg_ofld_connection_wr_rpl *req) 28001cab775cSVipul Pandya { 28011cab775cSVipul Pandya struct sk_buff *rpl_skb; 28021cab775cSVipul Pandya struct cpl_pass_accept_req *cpl; 28031cab775cSVipul Pandya int ret; 28041cab775cSVipul Pandya 28051cab775cSVipul Pandya rpl_skb = (struct sk_buff *)cpu_to_be64(req->cookie); 28061cab775cSVipul Pandya BUG_ON(!rpl_skb); 28071cab775cSVipul Pandya if (req->retval) { 28081cab775cSVipul Pandya PDBG("%s passive open failure %d\n", __func__, req->retval); 2809793dad94SVipul Pandya mutex_lock(&dev->rdev.stats.lock); 2810793dad94SVipul Pandya dev->rdev.stats.pas_ofld_conn_fails++; 2811793dad94SVipul Pandya mutex_unlock(&dev->rdev.stats.lock); 28121cab775cSVipul Pandya kfree_skb(rpl_skb); 28131cab775cSVipul Pandya } else { 28141cab775cSVipul Pandya cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb); 28151cab775cSVipul Pandya OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 28161cab775cSVipul Pandya htonl(req->tid))); 28171cab775cSVipul Pandya ret = pass_accept_req(dev, rpl_skb); 28181cab775cSVipul Pandya if (!ret) 28191cab775cSVipul Pandya kfree_skb(rpl_skb); 28201cab775cSVipul Pandya } 28211cab775cSVipul Pandya return; 28221cab775cSVipul Pandya } 28231cab775cSVipul Pandya 28241cab775cSVipul Pandya static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) 28252f5b48c3SSteve Wise { 28262f5b48c3SSteve Wise struct cpl_fw6_msg *rpl = cplhdr(skb); 28271cab775cSVipul Pandya struct cpl_fw6_msg_ofld_connection_wr_rpl *req; 28281cab775cSVipul Pandya 28291cab775cSVipul Pandya switch (rpl->type) { 28301cab775cSVipul Pandya case FW6_TYPE_CQE: 28312f5b48c3SSteve Wise c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); 28321cab775cSVipul Pandya break; 28331cab775cSVipul Pandya case FW6_TYPE_OFLD_CONNECTION_WR_RPL: 28341cab775cSVipul Pandya req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data; 28351cab775cSVipul Pandya switch (req->t_state) { 28361cab775cSVipul Pandya case TCP_SYN_SENT: 28371cab775cSVipul Pandya active_ofld_conn_reply(dev, skb, req); 28381cab775cSVipul Pandya break; 28391cab775cSVipul Pandya case TCP_SYN_RECV: 28401cab775cSVipul Pandya passive_ofld_conn_reply(dev, skb, req); 28411cab775cSVipul Pandya break; 28421cab775cSVipul Pandya default: 28431cab775cSVipul Pandya pr_err("%s unexpected ofld conn wr state %d\n", 28441cab775cSVipul Pandya __func__, req->t_state); 28451cab775cSVipul Pandya break; 28461cab775cSVipul Pandya } 28471cab775cSVipul Pandya break; 28481cab775cSVipul Pandya } 28491cab775cSVipul Pandya return 0; 28501cab775cSVipul Pandya } 28511cab775cSVipul Pandya 28521cab775cSVipul Pandya static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) 28531cab775cSVipul Pandya { 28541cab775cSVipul Pandya u32 l2info; 28551cab775cSVipul Pandya u16 vlantag, len, hdr_len; 28561cab775cSVipul Pandya u8 intf; 28571cab775cSVipul Pandya struct cpl_rx_pkt *cpl = cplhdr(skb); 28581cab775cSVipul Pandya struct cpl_pass_accept_req *req; 28591cab775cSVipul Pandya struct tcp_options_received tmp_opt; 28601cab775cSVipul Pandya 28611cab775cSVipul Pandya /* Store values from cpl_rx_pkt in temporary location. */ 28621cab775cSVipul Pandya vlantag = cpl->vlan; 28631cab775cSVipul Pandya len = cpl->len; 28641cab775cSVipul Pandya l2info = cpl->l2info; 28651cab775cSVipul Pandya hdr_len = cpl->hdr_len; 28661cab775cSVipul Pandya intf = cpl->iff; 28671cab775cSVipul Pandya 28681cab775cSVipul Pandya __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header)); 28691cab775cSVipul Pandya 28701cab775cSVipul Pandya /* 28711cab775cSVipul Pandya * We need to parse the TCP options from SYN packet. 28721cab775cSVipul Pandya * to generate cpl_pass_accept_req. 28731cab775cSVipul Pandya */ 28741cab775cSVipul Pandya memset(&tmp_opt, 0, sizeof(tmp_opt)); 28751cab775cSVipul Pandya tcp_clear_options(&tmp_opt); 28761cab775cSVipul Pandya tcp_parse_options(skb, &tmp_opt, 0, 0, NULL); 28771cab775cSVipul Pandya 28781cab775cSVipul Pandya req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); 28791cab775cSVipul Pandya memset(req, 0, sizeof(*req)); 28801cab775cSVipul Pandya req->l2info = cpu_to_be16(V_SYN_INTF(intf) | 28811cab775cSVipul Pandya V_SYN_MAC_IDX(G_RX_MACIDX(htonl(l2info))) | 28821cab775cSVipul Pandya F_SYN_XACT_MATCH); 28831cab775cSVipul Pandya req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(htonl(l2info))) | 28841cab775cSVipul Pandya V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(htons(hdr_len))) | 28851cab775cSVipul Pandya V_IP_HDR_LEN(G_RX_IPHDR_LEN(htons(hdr_len))) | 28861cab775cSVipul Pandya V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(htonl(l2info)))); 28871cab775cSVipul Pandya req->vlan = vlantag; 28881cab775cSVipul Pandya req->len = len; 28891cab775cSVipul Pandya req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) | 28901cab775cSVipul Pandya PASS_OPEN_TOS(tos)); 28911cab775cSVipul Pandya req->tcpopt.mss = htons(tmp_opt.mss_clamp); 28921cab775cSVipul Pandya if (tmp_opt.wscale_ok) 28931cab775cSVipul Pandya req->tcpopt.wsf = tmp_opt.snd_wscale; 28941cab775cSVipul Pandya req->tcpopt.tstamp = tmp_opt.saw_tstamp; 28951cab775cSVipul Pandya if (tmp_opt.sack_ok) 28961cab775cSVipul Pandya req->tcpopt.sack = 1; 28971cab775cSVipul Pandya OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0)); 28981cab775cSVipul Pandya return; 28991cab775cSVipul Pandya } 29001cab775cSVipul Pandya 29011cab775cSVipul Pandya static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, 29021cab775cSVipul Pandya __be32 laddr, __be16 lport, 29031cab775cSVipul Pandya __be32 raddr, __be16 rport, 29041cab775cSVipul Pandya u32 rcv_isn, u32 filter, u16 window, 29051cab775cSVipul Pandya u32 rss_qid, u8 port_id) 29061cab775cSVipul Pandya { 29071cab775cSVipul Pandya struct sk_buff *req_skb; 29081cab775cSVipul Pandya struct fw_ofld_connection_wr *req; 29091cab775cSVipul Pandya struct cpl_pass_accept_req *cpl = cplhdr(skb); 29101cab775cSVipul Pandya 29111cab775cSVipul Pandya req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); 29121cab775cSVipul Pandya req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req)); 29131cab775cSVipul Pandya memset(req, 0, sizeof(*req)); 29141cab775cSVipul Pandya req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1)); 29151cab775cSVipul Pandya req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); 29161cab775cSVipul Pandya req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL); 29171cab775cSVipul Pandya req->le.filter = filter; 29181cab775cSVipul Pandya req->le.lport = lport; 29191cab775cSVipul Pandya req->le.pport = rport; 29201cab775cSVipul Pandya req->le.u.ipv4.lip = laddr; 29211cab775cSVipul Pandya req->le.u.ipv4.pip = raddr; 29221cab775cSVipul Pandya req->tcb.rcv_nxt = htonl(rcv_isn + 1); 29231cab775cSVipul Pandya req->tcb.rcv_adv = htons(window); 29241cab775cSVipul Pandya req->tcb.t_state_to_astid = 29251cab775cSVipul Pandya htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV) | 29261cab775cSVipul Pandya V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl->tcpopt.wsf) | 29271cab775cSVipul Pandya V_FW_OFLD_CONNECTION_WR_ASTID( 29281cab775cSVipul Pandya GET_PASS_OPEN_TID(ntohl(cpl->tos_stid)))); 29291cab775cSVipul Pandya 29301cab775cSVipul Pandya /* 29311cab775cSVipul Pandya * We store the qid in opt2 which will be used by the firmware 29321cab775cSVipul Pandya * to send us the wr response. 29331cab775cSVipul Pandya */ 29341cab775cSVipul Pandya req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid)); 29351cab775cSVipul Pandya 29361cab775cSVipul Pandya /* 29371cab775cSVipul Pandya * We initialize the MSS index in TCB to 0xF. 29381cab775cSVipul Pandya * So that when driver sends cpl_pass_accept_rpl 29391cab775cSVipul Pandya * TCB picks up the correct value. If this was 0 29401cab775cSVipul Pandya * TP will ignore any value > 0 for MSS index. 29411cab775cSVipul Pandya */ 29421cab775cSVipul Pandya req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF)); 29431cab775cSVipul Pandya req->cookie = cpu_to_be64((u64)skb); 29441cab775cSVipul Pandya 29451cab775cSVipul Pandya set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); 29461cab775cSVipul Pandya cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); 29471cab775cSVipul Pandya } 29481cab775cSVipul Pandya 29491cab775cSVipul Pandya /* 29501cab775cSVipul Pandya * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt 29511cab775cSVipul Pandya * messages when a filter is being used instead of server to 29521cab775cSVipul Pandya * redirect a syn packet. When packets hit filter they are redirected 29531cab775cSVipul Pandya * to the offload queue and driver tries to establish the connection 29541cab775cSVipul Pandya * using firmware work request. 29551cab775cSVipul Pandya */ 29561cab775cSVipul Pandya static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) 29571cab775cSVipul Pandya { 29581cab775cSVipul Pandya int stid; 29591cab775cSVipul Pandya unsigned int filter; 29601cab775cSVipul Pandya struct ethhdr *eh = NULL; 29611cab775cSVipul Pandya struct vlan_ethhdr *vlan_eh = NULL; 29621cab775cSVipul Pandya struct iphdr *iph; 29631cab775cSVipul Pandya struct tcphdr *tcph; 29641cab775cSVipul Pandya struct rss_header *rss = (void *)skb->data; 29651cab775cSVipul Pandya struct cpl_rx_pkt *cpl = (void *)skb->data; 29661cab775cSVipul Pandya struct cpl_pass_accept_req *req = (void *)(rss + 1); 29671cab775cSVipul Pandya struct l2t_entry *e; 29681cab775cSVipul Pandya struct dst_entry *dst; 29691cab775cSVipul Pandya struct rtable *rt; 29701cab775cSVipul Pandya struct c4iw_ep *lep; 29711cab775cSVipul Pandya u16 window; 29721cab775cSVipul Pandya struct port_info *pi; 29731cab775cSVipul Pandya struct net_device *pdev; 29741cab775cSVipul Pandya u16 rss_qid; 29751cab775cSVipul Pandya int step; 29761cab775cSVipul Pandya u32 tx_chan; 29771cab775cSVipul Pandya struct neighbour *neigh; 29781cab775cSVipul Pandya 29791cab775cSVipul Pandya /* Drop all non-SYN packets */ 29801cab775cSVipul Pandya if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN))) 29811cab775cSVipul Pandya goto reject; 29821cab775cSVipul Pandya 29831cab775cSVipul Pandya /* 29841cab775cSVipul Pandya * Drop all packets which did not hit the filter. 29851cab775cSVipul Pandya * Unlikely to happen. 29861cab775cSVipul Pandya */ 29871cab775cSVipul Pandya if (!(rss->filter_hit && rss->filter_tid)) 29881cab775cSVipul Pandya goto reject; 29891cab775cSVipul Pandya 29901cab775cSVipul Pandya /* 29911cab775cSVipul Pandya * Calculate the server tid from filter hit index from cpl_rx_pkt. 29921cab775cSVipul Pandya */ 29931cab775cSVipul Pandya stid = cpu_to_be32(rss->hash_val) - dev->rdev.lldi.tids->sftid_base 29941cab775cSVipul Pandya + dev->rdev.lldi.tids->nstids; 29951cab775cSVipul Pandya 29961cab775cSVipul Pandya lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); 29971cab775cSVipul Pandya if (!lep) { 29981cab775cSVipul Pandya PDBG("%s connect request on invalid stid %d\n", __func__, stid); 29991cab775cSVipul Pandya goto reject; 30001cab775cSVipul Pandya } 30011cab775cSVipul Pandya 30021cab775cSVipul Pandya if (G_RX_ETHHDR_LEN(ntohl(cpl->l2info)) == ETH_HLEN) { 30031cab775cSVipul Pandya eh = (struct ethhdr *)(req + 1); 30041cab775cSVipul Pandya iph = (struct iphdr *)(eh + 1); 30051cab775cSVipul Pandya } else { 30061cab775cSVipul Pandya vlan_eh = (struct vlan_ethhdr *)(req + 1); 30071cab775cSVipul Pandya iph = (struct iphdr *)(vlan_eh + 1); 30081cab775cSVipul Pandya skb->vlan_tci = ntohs(cpl->vlan); 30091cab775cSVipul Pandya } 30101cab775cSVipul Pandya 30111cab775cSVipul Pandya if (iph->version != 0x4) 30121cab775cSVipul Pandya goto reject; 30131cab775cSVipul Pandya 30141cab775cSVipul Pandya tcph = (struct tcphdr *)(iph + 1); 30151cab775cSVipul Pandya skb_set_network_header(skb, (void *)iph - (void *)rss); 30161cab775cSVipul Pandya skb_set_transport_header(skb, (void *)tcph - (void *)rss); 30171cab775cSVipul Pandya skb_get(skb); 30181cab775cSVipul Pandya 30191cab775cSVipul Pandya PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__, 30201cab775cSVipul Pandya ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr), 30211cab775cSVipul Pandya ntohs(tcph->source), iph->tos); 30221cab775cSVipul Pandya 30231cab775cSVipul Pandya rt = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source, 30241cab775cSVipul Pandya iph->tos); 30251cab775cSVipul Pandya if (!rt) { 30261cab775cSVipul Pandya pr_err("%s - failed to find dst entry!\n", 30271cab775cSVipul Pandya __func__); 30281cab775cSVipul Pandya goto reject; 30291cab775cSVipul Pandya } 30301cab775cSVipul Pandya dst = &rt->dst; 30311cab775cSVipul Pandya neigh = dst_neigh_lookup_skb(dst, skb); 30321cab775cSVipul Pandya 30331cab775cSVipul Pandya if (neigh->dev->flags & IFF_LOOPBACK) { 30341cab775cSVipul Pandya pdev = ip_dev_find(&init_net, iph->daddr); 30351cab775cSVipul Pandya e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, 30361cab775cSVipul Pandya pdev, 0); 30371cab775cSVipul Pandya pi = (struct port_info *)netdev_priv(pdev); 30381cab775cSVipul Pandya tx_chan = cxgb4_port_chan(pdev); 30391cab775cSVipul Pandya dev_put(pdev); 30401cab775cSVipul Pandya } else { 30411cab775cSVipul Pandya e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, 30421cab775cSVipul Pandya neigh->dev, 0); 30431cab775cSVipul Pandya pi = (struct port_info *)netdev_priv(neigh->dev); 30441cab775cSVipul Pandya tx_chan = cxgb4_port_chan(neigh->dev); 30451cab775cSVipul Pandya } 30461cab775cSVipul Pandya if (!e) { 30471cab775cSVipul Pandya pr_err("%s - failed to allocate l2t entry!\n", 30481cab775cSVipul Pandya __func__); 30491cab775cSVipul Pandya goto free_dst; 30501cab775cSVipul Pandya } 30511cab775cSVipul Pandya 30521cab775cSVipul Pandya step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; 30531cab775cSVipul Pandya rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; 30541cab775cSVipul Pandya window = htons(tcph->window); 30551cab775cSVipul Pandya 30561cab775cSVipul Pandya /* Calcuate filter portion for LE region. */ 30571cab775cSVipul Pandya filter = cpu_to_be32(select_ntuple(dev, dst, e)); 30581cab775cSVipul Pandya 30591cab775cSVipul Pandya /* 30601cab775cSVipul Pandya * Synthesize the cpl_pass_accept_req. We have everything except the 30611cab775cSVipul Pandya * TID. Once firmware sends a reply with TID we update the TID field 30621cab775cSVipul Pandya * in cpl and pass it through the regular cpl_pass_accept_req path. 30631cab775cSVipul Pandya */ 30641cab775cSVipul Pandya build_cpl_pass_accept_req(skb, stid, iph->tos); 30651cab775cSVipul Pandya send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr, 30661cab775cSVipul Pandya tcph->source, ntohl(tcph->seq), filter, window, 30671cab775cSVipul Pandya rss_qid, pi->port_id); 30681cab775cSVipul Pandya cxgb4_l2t_release(e); 30691cab775cSVipul Pandya free_dst: 30701cab775cSVipul Pandya dst_release(dst); 30711cab775cSVipul Pandya reject: 30722f5b48c3SSteve Wise return 0; 30732f5b48c3SSteve Wise } 30742f5b48c3SSteve Wise 3075cfdda9d7SSteve Wise /* 3076be4c9badSRoland Dreier * These are the real handlers that are called from a 3077be4c9badSRoland Dreier * work queue. 3078be4c9badSRoland Dreier */ 3079be4c9badSRoland Dreier static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = { 3080be4c9badSRoland Dreier [CPL_ACT_ESTABLISH] = act_establish, 3081be4c9badSRoland Dreier [CPL_ACT_OPEN_RPL] = act_open_rpl, 3082be4c9badSRoland Dreier [CPL_RX_DATA] = rx_data, 3083be4c9badSRoland Dreier [CPL_ABORT_RPL_RSS] = abort_rpl, 3084be4c9badSRoland Dreier [CPL_ABORT_RPL] = abort_rpl, 3085be4c9badSRoland Dreier [CPL_PASS_OPEN_RPL] = pass_open_rpl, 3086be4c9badSRoland Dreier [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl, 3087be4c9badSRoland Dreier [CPL_PASS_ACCEPT_REQ] = pass_accept_req, 3088be4c9badSRoland Dreier [CPL_PASS_ESTABLISH] = pass_establish, 3089be4c9badSRoland Dreier [CPL_PEER_CLOSE] = peer_close, 3090be4c9badSRoland Dreier [CPL_ABORT_REQ_RSS] = peer_abort, 3091be4c9badSRoland Dreier [CPL_CLOSE_CON_RPL] = close_con_rpl, 3092be4c9badSRoland Dreier [CPL_RDMA_TERMINATE] = terminate, 30932f5b48c3SSteve Wise [CPL_FW4_ACK] = fw4_ack, 30941cab775cSVipul Pandya [CPL_FW6_MSG] = deferred_fw6_msg, 30951cab775cSVipul Pandya [CPL_RX_PKT] = rx_pkt 3096be4c9badSRoland Dreier }; 3097be4c9badSRoland Dreier 3098be4c9badSRoland Dreier static void process_timeout(struct c4iw_ep *ep) 3099be4c9badSRoland Dreier { 3100be4c9badSRoland Dreier struct c4iw_qp_attributes attrs; 3101be4c9badSRoland Dreier int abort = 1; 3102be4c9badSRoland Dreier 31032f5b48c3SSteve Wise mutex_lock(&ep->com.mutex); 3104be4c9badSRoland Dreier PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, 3105be4c9badSRoland Dreier ep->com.state); 3106793dad94SVipul Pandya set_bit(TIMEDOUT, &ep->com.history); 3107be4c9badSRoland Dreier switch (ep->com.state) { 3108be4c9badSRoland Dreier case MPA_REQ_SENT: 3109be4c9badSRoland Dreier __state_set(&ep->com, ABORTING); 3110be4c9badSRoland Dreier connect_reply_upcall(ep, -ETIMEDOUT); 3111be4c9badSRoland Dreier break; 3112be4c9badSRoland Dreier case MPA_REQ_WAIT: 3113be4c9badSRoland Dreier __state_set(&ep->com, ABORTING); 3114be4c9badSRoland Dreier break; 3115be4c9badSRoland Dreier case CLOSING: 3116be4c9badSRoland Dreier case MORIBUND: 3117be4c9badSRoland Dreier if (ep->com.cm_id && ep->com.qp) { 3118be4c9badSRoland Dreier attrs.next_state = C4IW_QP_STATE_ERROR; 3119be4c9badSRoland Dreier c4iw_modify_qp(ep->com.qp->rhp, 3120be4c9badSRoland Dreier ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 3121be4c9badSRoland Dreier &attrs, 1); 3122be4c9badSRoland Dreier } 3123be4c9badSRoland Dreier __state_set(&ep->com, ABORTING); 3124be4c9badSRoland Dreier break; 3125be4c9badSRoland Dreier default: 312676f267b7SJulia Lawall WARN(1, "%s unexpected state ep %p tid %u state %u\n", 3127be4c9badSRoland Dreier __func__, ep, ep->hwtid, ep->com.state); 3128be4c9badSRoland Dreier abort = 0; 3129be4c9badSRoland Dreier } 31302f5b48c3SSteve Wise mutex_unlock(&ep->com.mutex); 3131be4c9badSRoland Dreier if (abort) 3132be4c9badSRoland Dreier abort_connection(ep, NULL, GFP_KERNEL); 3133be4c9badSRoland Dreier c4iw_put_ep(&ep->com); 3134be4c9badSRoland Dreier } 3135be4c9badSRoland Dreier 3136be4c9badSRoland Dreier static void process_timedout_eps(void) 3137be4c9badSRoland Dreier { 3138be4c9badSRoland Dreier struct c4iw_ep *ep; 3139be4c9badSRoland Dreier 3140be4c9badSRoland Dreier spin_lock_irq(&timeout_lock); 3141be4c9badSRoland Dreier while (!list_empty(&timeout_list)) { 3142be4c9badSRoland Dreier struct list_head *tmp; 3143be4c9badSRoland Dreier 3144be4c9badSRoland Dreier tmp = timeout_list.next; 3145be4c9badSRoland Dreier list_del(tmp); 3146be4c9badSRoland Dreier spin_unlock_irq(&timeout_lock); 3147be4c9badSRoland Dreier ep = list_entry(tmp, struct c4iw_ep, entry); 3148be4c9badSRoland Dreier process_timeout(ep); 3149be4c9badSRoland Dreier spin_lock_irq(&timeout_lock); 3150be4c9badSRoland Dreier } 3151be4c9badSRoland Dreier spin_unlock_irq(&timeout_lock); 3152be4c9badSRoland Dreier } 3153be4c9badSRoland Dreier 3154be4c9badSRoland Dreier static void process_work(struct work_struct *work) 3155be4c9badSRoland Dreier { 3156be4c9badSRoland Dreier struct sk_buff *skb = NULL; 3157be4c9badSRoland Dreier struct c4iw_dev *dev; 3158c1d7356cSDan Carpenter struct cpl_act_establish *rpl; 3159be4c9badSRoland Dreier unsigned int opcode; 3160be4c9badSRoland Dreier int ret; 3161be4c9badSRoland Dreier 3162be4c9badSRoland Dreier while ((skb = skb_dequeue(&rxq))) { 3163be4c9badSRoland Dreier rpl = cplhdr(skb); 3164be4c9badSRoland Dreier dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); 3165be4c9badSRoland Dreier opcode = rpl->ot.opcode; 3166be4c9badSRoland Dreier 3167be4c9badSRoland Dreier BUG_ON(!work_handlers[opcode]); 3168be4c9badSRoland Dreier ret = work_handlers[opcode](dev, skb); 3169be4c9badSRoland Dreier if (!ret) 3170be4c9badSRoland Dreier kfree_skb(skb); 3171be4c9badSRoland Dreier } 3172be4c9badSRoland Dreier process_timedout_eps(); 3173be4c9badSRoland Dreier } 3174be4c9badSRoland Dreier 3175be4c9badSRoland Dreier static DECLARE_WORK(skb_work, process_work); 3176be4c9badSRoland Dreier 3177be4c9badSRoland Dreier static void ep_timeout(unsigned long arg) 3178be4c9badSRoland Dreier { 3179be4c9badSRoland Dreier struct c4iw_ep *ep = (struct c4iw_ep *)arg; 3180be4c9badSRoland Dreier 3181be4c9badSRoland Dreier spin_lock(&timeout_lock); 3182be4c9badSRoland Dreier list_add_tail(&ep->entry, &timeout_list); 3183be4c9badSRoland Dreier spin_unlock(&timeout_lock); 3184be4c9badSRoland Dreier queue_work(workq, &skb_work); 3185be4c9badSRoland Dreier } 3186be4c9badSRoland Dreier 3187be4c9badSRoland Dreier /* 3188cfdda9d7SSteve Wise * All the CM events are handled on a work queue to have a safe context. 3189cfdda9d7SSteve Wise */ 3190cfdda9d7SSteve Wise static int sched(struct c4iw_dev *dev, struct sk_buff *skb) 3191cfdda9d7SSteve Wise { 3192cfdda9d7SSteve Wise 3193cfdda9d7SSteve Wise /* 3194cfdda9d7SSteve Wise * Save dev in the skb->cb area. 3195cfdda9d7SSteve Wise */ 3196cfdda9d7SSteve Wise *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev; 3197cfdda9d7SSteve Wise 3198cfdda9d7SSteve Wise /* 3199cfdda9d7SSteve Wise * Queue the skb and schedule the worker thread. 3200cfdda9d7SSteve Wise */ 3201cfdda9d7SSteve Wise skb_queue_tail(&rxq, skb); 3202cfdda9d7SSteve Wise queue_work(workq, &skb_work); 3203cfdda9d7SSteve Wise return 0; 3204cfdda9d7SSteve Wise } 3205cfdda9d7SSteve Wise 3206cfdda9d7SSteve Wise static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 3207cfdda9d7SSteve Wise { 3208cfdda9d7SSteve Wise struct cpl_set_tcb_rpl *rpl = cplhdr(skb); 3209cfdda9d7SSteve Wise 3210cfdda9d7SSteve Wise if (rpl->status != CPL_ERR_NONE) { 3211cfdda9d7SSteve Wise printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u " 3212cfdda9d7SSteve Wise "for tid %u\n", rpl->status, GET_TID(rpl)); 3213cfdda9d7SSteve Wise } 32142f5b48c3SSteve Wise kfree_skb(skb); 3215cfdda9d7SSteve Wise return 0; 3216cfdda9d7SSteve Wise } 3217cfdda9d7SSteve Wise 3218be4c9badSRoland Dreier static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) 3219be4c9badSRoland Dreier { 3220be4c9badSRoland Dreier struct cpl_fw6_msg *rpl = cplhdr(skb); 3221be4c9badSRoland Dreier struct c4iw_wr_wait *wr_waitp; 3222be4c9badSRoland Dreier int ret; 3223be4c9badSRoland Dreier 3224be4c9badSRoland Dreier PDBG("%s type %u\n", __func__, rpl->type); 3225be4c9badSRoland Dreier 3226be4c9badSRoland Dreier switch (rpl->type) { 32275be78ee9SVipul Pandya case FW6_TYPE_WR_RPL: 3228be4c9badSRoland Dreier ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); 3229c8e081a1SRoland Dreier wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; 3230be4c9badSRoland Dreier PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); 3231d9594d99SSteve Wise if (wr_waitp) 3232d9594d99SSteve Wise c4iw_wake_up(wr_waitp, ret ? -ret : 0); 32332f5b48c3SSteve Wise kfree_skb(skb); 3234be4c9badSRoland Dreier break; 32355be78ee9SVipul Pandya case FW6_TYPE_CQE: 32365be78ee9SVipul Pandya case FW6_TYPE_OFLD_CONNECTION_WR_RPL: 32371cab775cSVipul Pandya sched(dev, skb); 32385be78ee9SVipul Pandya break; 3239be4c9badSRoland Dreier default: 3240be4c9badSRoland Dreier printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, 3241be4c9badSRoland Dreier rpl->type); 32422f5b48c3SSteve Wise kfree_skb(skb); 3243be4c9badSRoland Dreier break; 3244be4c9badSRoland Dreier } 3245be4c9badSRoland Dreier return 0; 3246be4c9badSRoland Dreier } 3247be4c9badSRoland Dreier 32488da7e7a5SSteve Wise static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) 32498da7e7a5SSteve Wise { 32508da7e7a5SSteve Wise struct cpl_abort_req_rss *req = cplhdr(skb); 32518da7e7a5SSteve Wise struct c4iw_ep *ep; 32528da7e7a5SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 32538da7e7a5SSteve Wise unsigned int tid = GET_TID(req); 32548da7e7a5SSteve Wise 32558da7e7a5SSteve Wise ep = lookup_tid(t, tid); 325614b92228SSteve Wise if (!ep) { 325714b92228SSteve Wise printk(KERN_WARNING MOD 325814b92228SSteve Wise "Abort on non-existent endpoint, tid %d\n", tid); 325914b92228SSteve Wise kfree_skb(skb); 326014b92228SSteve Wise return 0; 326114b92228SSteve Wise } 32628da7e7a5SSteve Wise if (is_neg_adv_abort(req->status)) { 32638da7e7a5SSteve Wise PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, 32648da7e7a5SSteve Wise ep->hwtid); 32658da7e7a5SSteve Wise kfree_skb(skb); 32668da7e7a5SSteve Wise return 0; 32678da7e7a5SSteve Wise } 32688da7e7a5SSteve Wise PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 32698da7e7a5SSteve Wise ep->com.state); 32708da7e7a5SSteve Wise 32718da7e7a5SSteve Wise /* 32728da7e7a5SSteve Wise * Wake up any threads in rdma_init() or rdma_fini(). 32738da7e7a5SSteve Wise */ 32748da7e7a5SSteve Wise c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 32758da7e7a5SSteve Wise sched(dev, skb); 32768da7e7a5SSteve Wise return 0; 32778da7e7a5SSteve Wise } 32788da7e7a5SSteve Wise 3279be4c9badSRoland Dreier /* 3280be4c9badSRoland Dreier * Most upcalls from the T4 Core go to sched() to 3281be4c9badSRoland Dreier * schedule the processing on a work queue. 3282be4c9badSRoland Dreier */ 3283be4c9badSRoland Dreier c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = { 3284be4c9badSRoland Dreier [CPL_ACT_ESTABLISH] = sched, 3285be4c9badSRoland Dreier [CPL_ACT_OPEN_RPL] = sched, 3286be4c9badSRoland Dreier [CPL_RX_DATA] = sched, 3287be4c9badSRoland Dreier [CPL_ABORT_RPL_RSS] = sched, 3288be4c9badSRoland Dreier [CPL_ABORT_RPL] = sched, 3289be4c9badSRoland Dreier [CPL_PASS_OPEN_RPL] = sched, 3290be4c9badSRoland Dreier [CPL_CLOSE_LISTSRV_RPL] = sched, 3291be4c9badSRoland Dreier [CPL_PASS_ACCEPT_REQ] = sched, 3292be4c9badSRoland Dreier [CPL_PASS_ESTABLISH] = sched, 3293be4c9badSRoland Dreier [CPL_PEER_CLOSE] = sched, 3294be4c9badSRoland Dreier [CPL_CLOSE_CON_RPL] = sched, 32958da7e7a5SSteve Wise [CPL_ABORT_REQ_RSS] = peer_abort_intr, 3296be4c9badSRoland Dreier [CPL_RDMA_TERMINATE] = sched, 3297be4c9badSRoland Dreier [CPL_FW4_ACK] = sched, 3298be4c9badSRoland Dreier [CPL_SET_TCB_RPL] = set_tcb_rpl, 32991cab775cSVipul Pandya [CPL_FW6_MSG] = fw6_msg, 33001cab775cSVipul Pandya [CPL_RX_PKT] = sched 3301be4c9badSRoland Dreier }; 3302be4c9badSRoland Dreier 3303cfdda9d7SSteve Wise int __init c4iw_cm_init(void) 3304cfdda9d7SSteve Wise { 3305be4c9badSRoland Dreier spin_lock_init(&timeout_lock); 3306cfdda9d7SSteve Wise skb_queue_head_init(&rxq); 3307cfdda9d7SSteve Wise 3308cfdda9d7SSteve Wise workq = create_singlethread_workqueue("iw_cxgb4"); 3309cfdda9d7SSteve Wise if (!workq) 3310cfdda9d7SSteve Wise return -ENOMEM; 3311cfdda9d7SSteve Wise 3312cfdda9d7SSteve Wise return 0; 3313cfdda9d7SSteve Wise } 3314cfdda9d7SSteve Wise 3315cfdda9d7SSteve Wise void __exit c4iw_cm_term(void) 3316cfdda9d7SSteve Wise { 3317be4c9badSRoland Dreier WARN_ON(!list_empty(&timeout_list)); 3318cfdda9d7SSteve Wise flush_workqueue(workq); 3319cfdda9d7SSteve Wise destroy_workqueue(workq); 3320cfdda9d7SSteve Wise } 3321