1cfdda9d7SSteve Wise /* 2cfdda9d7SSteve Wise * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. 3cfdda9d7SSteve Wise * 4cfdda9d7SSteve Wise * This software is available to you under a choice of one of two 5cfdda9d7SSteve Wise * licenses. You may choose to be licensed under the terms of the GNU 6cfdda9d7SSteve Wise * General Public License (GPL) Version 2, available from the file 7cfdda9d7SSteve Wise * COPYING in the main directory of this source tree, or the 8cfdda9d7SSteve Wise * OpenIB.org BSD license below: 9cfdda9d7SSteve Wise * 10cfdda9d7SSteve Wise * Redistribution and use in source and binary forms, with or 11cfdda9d7SSteve Wise * without modification, are permitted provided that the following 12cfdda9d7SSteve Wise * conditions are met: 13cfdda9d7SSteve Wise * 14cfdda9d7SSteve Wise * - Redistributions of source code must retain the above 15cfdda9d7SSteve Wise * copyright notice, this list of conditions and the following 16cfdda9d7SSteve Wise * disclaimer. 17cfdda9d7SSteve Wise * 18cfdda9d7SSteve Wise * - Redistributions in binary form must reproduce the above 19cfdda9d7SSteve Wise * copyright notice, this list of conditions and the following 20cfdda9d7SSteve Wise * disclaimer in the documentation and/or other materials 21cfdda9d7SSteve Wise * provided with the distribution. 22cfdda9d7SSteve Wise * 23cfdda9d7SSteve Wise * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24cfdda9d7SSteve Wise * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25cfdda9d7SSteve Wise * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26cfdda9d7SSteve Wise * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27cfdda9d7SSteve Wise * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28cfdda9d7SSteve Wise * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29cfdda9d7SSteve Wise * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30cfdda9d7SSteve Wise * SOFTWARE. 31cfdda9d7SSteve Wise */ 32cfdda9d7SSteve Wise #include <linux/module.h> 33cfdda9d7SSteve Wise #include <linux/list.h> 34cfdda9d7SSteve Wise #include <linux/workqueue.h> 35cfdda9d7SSteve Wise #include <linux/skbuff.h> 36cfdda9d7SSteve Wise #include <linux/timer.h> 37cfdda9d7SSteve Wise #include <linux/notifier.h> 38cfdda9d7SSteve Wise #include <linux/inetdevice.h> 39cfdda9d7SSteve Wise #include <linux/ip.h> 40cfdda9d7SSteve Wise #include <linux/tcp.h> 41cfdda9d7SSteve Wise 42cfdda9d7SSteve Wise #include <net/neighbour.h> 43cfdda9d7SSteve Wise #include <net/netevent.h> 44cfdda9d7SSteve Wise #include <net/route.h> 45cfdda9d7SSteve Wise 46cfdda9d7SSteve Wise #include "iw_cxgb4.h" 47cfdda9d7SSteve Wise 48cfdda9d7SSteve Wise static char *states[] = { 49cfdda9d7SSteve Wise "idle", 50cfdda9d7SSteve Wise "listen", 51cfdda9d7SSteve Wise "connecting", 52cfdda9d7SSteve Wise "mpa_wait_req", 53cfdda9d7SSteve Wise "mpa_req_sent", 54cfdda9d7SSteve Wise "mpa_req_rcvd", 55cfdda9d7SSteve Wise "mpa_rep_sent", 56cfdda9d7SSteve Wise "fpdu_mode", 57cfdda9d7SSteve Wise "aborting", 58cfdda9d7SSteve Wise "closing", 59cfdda9d7SSteve Wise "moribund", 60cfdda9d7SSteve Wise "dead", 61cfdda9d7SSteve Wise NULL, 62cfdda9d7SSteve Wise }; 63cfdda9d7SSteve Wise 645be78ee9SVipul Pandya static int nocong; 655be78ee9SVipul Pandya module_param(nocong, int, 0644); 665be78ee9SVipul Pandya MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)"); 675be78ee9SVipul Pandya 685be78ee9SVipul Pandya static int enable_ecn; 695be78ee9SVipul Pandya module_param(enable_ecn, int, 0644); 705be78ee9SVipul Pandya MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)"); 715be78ee9SVipul Pandya 72b52fe09eSSteve Wise static int dack_mode = 1; 73ba6d3925SSteve Wise module_param(dack_mode, int, 0644); 74b52fe09eSSteve Wise MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)"); 75ba6d3925SSteve Wise 76be4c9badSRoland Dreier int c4iw_max_read_depth = 8; 77be4c9badSRoland Dreier module_param(c4iw_max_read_depth, int, 0644); 78be4c9badSRoland Dreier MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)"); 79be4c9badSRoland Dreier 80cfdda9d7SSteve Wise static int enable_tcp_timestamps; 81cfdda9d7SSteve Wise module_param(enable_tcp_timestamps, int, 0644); 82cfdda9d7SSteve Wise MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); 83cfdda9d7SSteve Wise 84cfdda9d7SSteve Wise static int enable_tcp_sack; 85cfdda9d7SSteve Wise module_param(enable_tcp_sack, int, 0644); 86cfdda9d7SSteve Wise MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)"); 87cfdda9d7SSteve Wise 88cfdda9d7SSteve Wise static int enable_tcp_window_scaling = 1; 89cfdda9d7SSteve Wise module_param(enable_tcp_window_scaling, int, 0644); 90cfdda9d7SSteve Wise MODULE_PARM_DESC(enable_tcp_window_scaling, 91cfdda9d7SSteve Wise "Enable tcp window scaling (default=1)"); 92cfdda9d7SSteve Wise 93cfdda9d7SSteve Wise int c4iw_debug; 94cfdda9d7SSteve Wise module_param(c4iw_debug, int, 0644); 95cfdda9d7SSteve Wise MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)"); 96cfdda9d7SSteve Wise 97cfdda9d7SSteve Wise static int peer2peer; 98cfdda9d7SSteve Wise module_param(peer2peer, int, 0644); 99cfdda9d7SSteve Wise MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)"); 100cfdda9d7SSteve Wise 101cfdda9d7SSteve Wise static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; 102cfdda9d7SSteve Wise module_param(p2p_type, int, 0644); 103cfdda9d7SSteve Wise MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: " 104cfdda9d7SSteve Wise "1=RDMA_READ 0=RDMA_WRITE (default 1)"); 105cfdda9d7SSteve Wise 106cfdda9d7SSteve Wise static int ep_timeout_secs = 60; 107cfdda9d7SSteve Wise module_param(ep_timeout_secs, int, 0644); 108cfdda9d7SSteve Wise MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " 109cfdda9d7SSteve Wise "in seconds (default=60)"); 110cfdda9d7SSteve Wise 111cfdda9d7SSteve Wise static int mpa_rev = 1; 112cfdda9d7SSteve Wise module_param(mpa_rev, int, 0644); 113cfdda9d7SSteve Wise MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " 114d2fe99e8SKumar Sanghvi "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft" 115d2fe99e8SKumar Sanghvi " compliant (default=1)"); 116cfdda9d7SSteve Wise 117cfdda9d7SSteve Wise static int markers_enabled; 118cfdda9d7SSteve Wise module_param(markers_enabled, int, 0644); 119cfdda9d7SSteve Wise MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)"); 120cfdda9d7SSteve Wise 121cfdda9d7SSteve Wise static int crc_enabled = 1; 122cfdda9d7SSteve Wise module_param(crc_enabled, int, 0644); 123cfdda9d7SSteve Wise MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)"); 124cfdda9d7SSteve Wise 125cfdda9d7SSteve Wise static int rcv_win = 256 * 1024; 126cfdda9d7SSteve Wise module_param(rcv_win, int, 0644); 127cfdda9d7SSteve Wise MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)"); 128cfdda9d7SSteve Wise 12998ae68b7SSteve Wise static int snd_win = 128 * 1024; 130cfdda9d7SSteve Wise module_param(snd_win, int, 0644); 13198ae68b7SSteve Wise MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)"); 132cfdda9d7SSteve Wise 133cfdda9d7SSteve Wise static struct workqueue_struct *workq; 134cfdda9d7SSteve Wise 135cfdda9d7SSteve Wise static struct sk_buff_head rxq; 136cfdda9d7SSteve Wise 137cfdda9d7SSteve Wise static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); 138cfdda9d7SSteve Wise static void ep_timeout(unsigned long arg); 139cfdda9d7SSteve Wise static void connect_reply_upcall(struct c4iw_ep *ep, int status); 140cfdda9d7SSteve Wise 141be4c9badSRoland Dreier static LIST_HEAD(timeout_list); 142be4c9badSRoland Dreier static spinlock_t timeout_lock; 143be4c9badSRoland Dreier 144cfdda9d7SSteve Wise static void start_ep_timer(struct c4iw_ep *ep) 145cfdda9d7SSteve Wise { 146cfdda9d7SSteve Wise PDBG("%s ep %p\n", __func__, ep); 147cfdda9d7SSteve Wise if (timer_pending(&ep->timer)) { 148cfdda9d7SSteve Wise PDBG("%s stopped / restarted timer ep %p\n", __func__, ep); 149cfdda9d7SSteve Wise del_timer_sync(&ep->timer); 150cfdda9d7SSteve Wise } else 151cfdda9d7SSteve Wise c4iw_get_ep(&ep->com); 152cfdda9d7SSteve Wise ep->timer.expires = jiffies + ep_timeout_secs * HZ; 153cfdda9d7SSteve Wise ep->timer.data = (unsigned long)ep; 154cfdda9d7SSteve Wise ep->timer.function = ep_timeout; 155cfdda9d7SSteve Wise add_timer(&ep->timer); 156cfdda9d7SSteve Wise } 157cfdda9d7SSteve Wise 158cfdda9d7SSteve Wise static void stop_ep_timer(struct c4iw_ep *ep) 159cfdda9d7SSteve Wise { 160cfdda9d7SSteve Wise PDBG("%s ep %p\n", __func__, ep); 161cfdda9d7SSteve Wise if (!timer_pending(&ep->timer)) { 16276f267b7SJulia Lawall WARN(1, "%s timer stopped when its not running! " 163cfdda9d7SSteve Wise "ep %p state %u\n", __func__, ep, ep->com.state); 164cfdda9d7SSteve Wise return; 165cfdda9d7SSteve Wise } 166cfdda9d7SSteve Wise del_timer_sync(&ep->timer); 167cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 168cfdda9d7SSteve Wise } 169cfdda9d7SSteve Wise 170cfdda9d7SSteve Wise static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, 171cfdda9d7SSteve Wise struct l2t_entry *l2e) 172cfdda9d7SSteve Wise { 173cfdda9d7SSteve Wise int error = 0; 174cfdda9d7SSteve Wise 175cfdda9d7SSteve Wise if (c4iw_fatal_error(rdev)) { 176cfdda9d7SSteve Wise kfree_skb(skb); 177cfdda9d7SSteve Wise PDBG("%s - device in error state - dropping\n", __func__); 178cfdda9d7SSteve Wise return -EIO; 179cfdda9d7SSteve Wise } 180cfdda9d7SSteve Wise error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); 181cfdda9d7SSteve Wise if (error < 0) 182cfdda9d7SSteve Wise kfree_skb(skb); 18374594861SSteve Wise return error < 0 ? error : 0; 184cfdda9d7SSteve Wise } 185cfdda9d7SSteve Wise 186cfdda9d7SSteve Wise int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) 187cfdda9d7SSteve Wise { 188cfdda9d7SSteve Wise int error = 0; 189cfdda9d7SSteve Wise 190cfdda9d7SSteve Wise if (c4iw_fatal_error(rdev)) { 191cfdda9d7SSteve Wise kfree_skb(skb); 192cfdda9d7SSteve Wise PDBG("%s - device in error state - dropping\n", __func__); 193cfdda9d7SSteve Wise return -EIO; 194cfdda9d7SSteve Wise } 195cfdda9d7SSteve Wise error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); 196cfdda9d7SSteve Wise if (error < 0) 197cfdda9d7SSteve Wise kfree_skb(skb); 19874594861SSteve Wise return error < 0 ? error : 0; 199cfdda9d7SSteve Wise } 200cfdda9d7SSteve Wise 201cfdda9d7SSteve Wise static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) 202cfdda9d7SSteve Wise { 203cfdda9d7SSteve Wise struct cpl_tid_release *req; 204cfdda9d7SSteve Wise 205cfdda9d7SSteve Wise skb = get_skb(skb, sizeof *req, GFP_KERNEL); 206cfdda9d7SSteve Wise if (!skb) 207cfdda9d7SSteve Wise return; 208cfdda9d7SSteve Wise req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); 209cfdda9d7SSteve Wise INIT_TP_WR(req, hwtid); 210cfdda9d7SSteve Wise OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); 211cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); 212cfdda9d7SSteve Wise c4iw_ofld_send(rdev, skb); 213cfdda9d7SSteve Wise return; 214cfdda9d7SSteve Wise } 215cfdda9d7SSteve Wise 216cfdda9d7SSteve Wise static void set_emss(struct c4iw_ep *ep, u16 opt) 217cfdda9d7SSteve Wise { 218cfdda9d7SSteve Wise ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40; 219cfdda9d7SSteve Wise ep->mss = ep->emss; 220cfdda9d7SSteve Wise if (GET_TCPOPT_TSTAMP(opt)) 221cfdda9d7SSteve Wise ep->emss -= 12; 222cfdda9d7SSteve Wise if (ep->emss < 128) 223cfdda9d7SSteve Wise ep->emss = 128; 224cfdda9d7SSteve Wise PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt), 225cfdda9d7SSteve Wise ep->mss, ep->emss); 226cfdda9d7SSteve Wise } 227cfdda9d7SSteve Wise 228cfdda9d7SSteve Wise static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) 229cfdda9d7SSteve Wise { 230cfdda9d7SSteve Wise enum c4iw_ep_state state; 231cfdda9d7SSteve Wise 2322f5b48c3SSteve Wise mutex_lock(&epc->mutex); 233cfdda9d7SSteve Wise state = epc->state; 2342f5b48c3SSteve Wise mutex_unlock(&epc->mutex); 235cfdda9d7SSteve Wise return state; 236cfdda9d7SSteve Wise } 237cfdda9d7SSteve Wise 238cfdda9d7SSteve Wise static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 239cfdda9d7SSteve Wise { 240cfdda9d7SSteve Wise epc->state = new; 241cfdda9d7SSteve Wise } 242cfdda9d7SSteve Wise 243cfdda9d7SSteve Wise static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 244cfdda9d7SSteve Wise { 2452f5b48c3SSteve Wise mutex_lock(&epc->mutex); 246cfdda9d7SSteve Wise PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]); 247cfdda9d7SSteve Wise __state_set(epc, new); 2482f5b48c3SSteve Wise mutex_unlock(&epc->mutex); 249cfdda9d7SSteve Wise return; 250cfdda9d7SSteve Wise } 251cfdda9d7SSteve Wise 252cfdda9d7SSteve Wise static void *alloc_ep(int size, gfp_t gfp) 253cfdda9d7SSteve Wise { 254cfdda9d7SSteve Wise struct c4iw_ep_common *epc; 255cfdda9d7SSteve Wise 256cfdda9d7SSteve Wise epc = kzalloc(size, gfp); 257cfdda9d7SSteve Wise if (epc) { 258cfdda9d7SSteve Wise kref_init(&epc->kref); 2592f5b48c3SSteve Wise mutex_init(&epc->mutex); 260aadc4df3SSteve Wise c4iw_init_wr_wait(&epc->wr_wait); 261cfdda9d7SSteve Wise } 262cfdda9d7SSteve Wise PDBG("%s alloc ep %p\n", __func__, epc); 263cfdda9d7SSteve Wise return epc; 264cfdda9d7SSteve Wise } 265cfdda9d7SSteve Wise 266cfdda9d7SSteve Wise void _c4iw_free_ep(struct kref *kref) 267cfdda9d7SSteve Wise { 268cfdda9d7SSteve Wise struct c4iw_ep *ep; 269cfdda9d7SSteve Wise 270cfdda9d7SSteve Wise ep = container_of(kref, struct c4iw_ep, com.kref); 271cfdda9d7SSteve Wise PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); 272cfdda9d7SSteve Wise if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { 273cfdda9d7SSteve Wise cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); 274cfdda9d7SSteve Wise dst_release(ep->dst); 275cfdda9d7SSteve Wise cxgb4_l2t_release(ep->l2t); 276cfdda9d7SSteve Wise } 277cfdda9d7SSteve Wise kfree(ep); 278cfdda9d7SSteve Wise } 279cfdda9d7SSteve Wise 280cfdda9d7SSteve Wise static void release_ep_resources(struct c4iw_ep *ep) 281cfdda9d7SSteve Wise { 282cfdda9d7SSteve Wise set_bit(RELEASE_RESOURCES, &ep->com.flags); 283cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 284cfdda9d7SSteve Wise } 285cfdda9d7SSteve Wise 286cfdda9d7SSteve Wise static int status2errno(int status) 287cfdda9d7SSteve Wise { 288cfdda9d7SSteve Wise switch (status) { 289cfdda9d7SSteve Wise case CPL_ERR_NONE: 290cfdda9d7SSteve Wise return 0; 291cfdda9d7SSteve Wise case CPL_ERR_CONN_RESET: 292cfdda9d7SSteve Wise return -ECONNRESET; 293cfdda9d7SSteve Wise case CPL_ERR_ARP_MISS: 294cfdda9d7SSteve Wise return -EHOSTUNREACH; 295cfdda9d7SSteve Wise case CPL_ERR_CONN_TIMEDOUT: 296cfdda9d7SSteve Wise return -ETIMEDOUT; 297cfdda9d7SSteve Wise case CPL_ERR_TCAM_FULL: 298cfdda9d7SSteve Wise return -ENOMEM; 299cfdda9d7SSteve Wise case CPL_ERR_CONN_EXIST: 300cfdda9d7SSteve Wise return -EADDRINUSE; 301cfdda9d7SSteve Wise default: 302cfdda9d7SSteve Wise return -EIO; 303cfdda9d7SSteve Wise } 304cfdda9d7SSteve Wise } 305cfdda9d7SSteve Wise 306cfdda9d7SSteve Wise /* 307cfdda9d7SSteve Wise * Try and reuse skbs already allocated... 308cfdda9d7SSteve Wise */ 309cfdda9d7SSteve Wise static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) 310cfdda9d7SSteve Wise { 311cfdda9d7SSteve Wise if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) { 312cfdda9d7SSteve Wise skb_trim(skb, 0); 313cfdda9d7SSteve Wise skb_get(skb); 314cfdda9d7SSteve Wise skb_reset_transport_header(skb); 315cfdda9d7SSteve Wise } else { 316cfdda9d7SSteve Wise skb = alloc_skb(len, gfp); 317cfdda9d7SSteve Wise } 318cfdda9d7SSteve Wise return skb; 319cfdda9d7SSteve Wise } 320cfdda9d7SSteve Wise 321cfdda9d7SSteve Wise static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip, 322cfdda9d7SSteve Wise __be32 peer_ip, __be16 local_port, 323cfdda9d7SSteve Wise __be16 peer_port, u8 tos) 324cfdda9d7SSteve Wise { 325cfdda9d7SSteve Wise struct rtable *rt; 32631e4543dSDavid S. Miller struct flowi4 fl4; 327cfdda9d7SSteve Wise 32831e4543dSDavid S. Miller rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip, 32978fbfd8aSDavid S. Miller peer_port, local_port, IPPROTO_TCP, 33078fbfd8aSDavid S. Miller tos, 0); 331b23dd4feSDavid S. Miller if (IS_ERR(rt)) 332cfdda9d7SSteve Wise return NULL; 333cfdda9d7SSteve Wise return rt; 334cfdda9d7SSteve Wise } 335cfdda9d7SSteve Wise 336cfdda9d7SSteve Wise static void arp_failure_discard(void *handle, struct sk_buff *skb) 337cfdda9d7SSteve Wise { 338cfdda9d7SSteve Wise PDBG("%s c4iw_dev %p\n", __func__, handle); 339cfdda9d7SSteve Wise kfree_skb(skb); 340cfdda9d7SSteve Wise } 341cfdda9d7SSteve Wise 342cfdda9d7SSteve Wise /* 343cfdda9d7SSteve Wise * Handle an ARP failure for an active open. 344cfdda9d7SSteve Wise */ 345cfdda9d7SSteve Wise static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) 346cfdda9d7SSteve Wise { 347cfdda9d7SSteve Wise printk(KERN_ERR MOD "ARP failure duing connect\n"); 348cfdda9d7SSteve Wise kfree_skb(skb); 349cfdda9d7SSteve Wise } 350cfdda9d7SSteve Wise 351cfdda9d7SSteve Wise /* 352cfdda9d7SSteve Wise * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant 353cfdda9d7SSteve Wise * and send it along. 354cfdda9d7SSteve Wise */ 355cfdda9d7SSteve Wise static void abort_arp_failure(void *handle, struct sk_buff *skb) 356cfdda9d7SSteve Wise { 357cfdda9d7SSteve Wise struct c4iw_rdev *rdev = handle; 358cfdda9d7SSteve Wise struct cpl_abort_req *req = cplhdr(skb); 359cfdda9d7SSteve Wise 360cfdda9d7SSteve Wise PDBG("%s rdev %p\n", __func__, rdev); 361cfdda9d7SSteve Wise req->cmd = CPL_ABORT_NO_RST; 362cfdda9d7SSteve Wise c4iw_ofld_send(rdev, skb); 363cfdda9d7SSteve Wise } 364cfdda9d7SSteve Wise 365cfdda9d7SSteve Wise static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) 366cfdda9d7SSteve Wise { 367cfdda9d7SSteve Wise unsigned int flowclen = 80; 368cfdda9d7SSteve Wise struct fw_flowc_wr *flowc; 369cfdda9d7SSteve Wise int i; 370cfdda9d7SSteve Wise 371cfdda9d7SSteve Wise skb = get_skb(skb, flowclen, GFP_KERNEL); 372cfdda9d7SSteve Wise flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); 373cfdda9d7SSteve Wise 374cfdda9d7SSteve Wise flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) | 375cfdda9d7SSteve Wise FW_FLOWC_WR_NPARAMS(8)); 376cfdda9d7SSteve Wise flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen, 377cfdda9d7SSteve Wise 16)) | FW_WR_FLOWID(ep->hwtid)); 378cfdda9d7SSteve Wise 379cfdda9d7SSteve Wise flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 38094788657SSteve Wise flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8); 381cfdda9d7SSteve Wise flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 382cfdda9d7SSteve Wise flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); 383cfdda9d7SSteve Wise flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 384cfdda9d7SSteve Wise flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan); 385cfdda9d7SSteve Wise flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 386cfdda9d7SSteve Wise flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid); 387cfdda9d7SSteve Wise flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 388cfdda9d7SSteve Wise flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq); 389cfdda9d7SSteve Wise flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 390cfdda9d7SSteve Wise flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); 391cfdda9d7SSteve Wise flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 392cfdda9d7SSteve Wise flowc->mnemval[6].val = cpu_to_be32(snd_win); 393cfdda9d7SSteve Wise flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 394cfdda9d7SSteve Wise flowc->mnemval[7].val = cpu_to_be32(ep->emss); 395cfdda9d7SSteve Wise /* Pad WR to 16 byte boundary */ 396cfdda9d7SSteve Wise flowc->mnemval[8].mnemonic = 0; 397cfdda9d7SSteve Wise flowc->mnemval[8].val = 0; 398cfdda9d7SSteve Wise for (i = 0; i < 9; i++) { 399cfdda9d7SSteve Wise flowc->mnemval[i].r4[0] = 0; 400cfdda9d7SSteve Wise flowc->mnemval[i].r4[1] = 0; 401cfdda9d7SSteve Wise flowc->mnemval[i].r4[2] = 0; 402cfdda9d7SSteve Wise } 403cfdda9d7SSteve Wise 404cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 405cfdda9d7SSteve Wise c4iw_ofld_send(&ep->com.dev->rdev, skb); 406cfdda9d7SSteve Wise } 407cfdda9d7SSteve Wise 408cfdda9d7SSteve Wise static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp) 409cfdda9d7SSteve Wise { 410cfdda9d7SSteve Wise struct cpl_close_con_req *req; 411cfdda9d7SSteve Wise struct sk_buff *skb; 412cfdda9d7SSteve Wise int wrlen = roundup(sizeof *req, 16); 413cfdda9d7SSteve Wise 414cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 415cfdda9d7SSteve Wise skb = get_skb(NULL, wrlen, gfp); 416cfdda9d7SSteve Wise if (!skb) { 417cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); 418cfdda9d7SSteve Wise return -ENOMEM; 419cfdda9d7SSteve Wise } 420cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 421cfdda9d7SSteve Wise t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 422cfdda9d7SSteve Wise req = (struct cpl_close_con_req *) skb_put(skb, wrlen); 423cfdda9d7SSteve Wise memset(req, 0, wrlen); 424cfdda9d7SSteve Wise INIT_TP_WR(req, ep->hwtid); 425cfdda9d7SSteve Wise OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, 426cfdda9d7SSteve Wise ep->hwtid)); 427cfdda9d7SSteve Wise return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 428cfdda9d7SSteve Wise } 429cfdda9d7SSteve Wise 430cfdda9d7SSteve Wise static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) 431cfdda9d7SSteve Wise { 432cfdda9d7SSteve Wise struct cpl_abort_req *req; 433cfdda9d7SSteve Wise int wrlen = roundup(sizeof *req, 16); 434cfdda9d7SSteve Wise 435cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 436cfdda9d7SSteve Wise skb = get_skb(skb, wrlen, gfp); 437cfdda9d7SSteve Wise if (!skb) { 438cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 439cfdda9d7SSteve Wise __func__); 440cfdda9d7SSteve Wise return -ENOMEM; 441cfdda9d7SSteve Wise } 442cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 443cfdda9d7SSteve Wise t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure); 444cfdda9d7SSteve Wise req = (struct cpl_abort_req *) skb_put(skb, wrlen); 445cfdda9d7SSteve Wise memset(req, 0, wrlen); 446cfdda9d7SSteve Wise INIT_TP_WR(req, ep->hwtid); 447cfdda9d7SSteve Wise OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); 448cfdda9d7SSteve Wise req->cmd = CPL_ABORT_SEND_RST; 449cfdda9d7SSteve Wise return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 450cfdda9d7SSteve Wise } 451cfdda9d7SSteve Wise 4525be78ee9SVipul Pandya #define VLAN_NONE 0xfff 4535be78ee9SVipul Pandya #define FILTER_SEL_VLAN_NONE 0xffff 4545be78ee9SVipul Pandya #define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */ 4555be78ee9SVipul Pandya #define FILTER_SEL_WIDTH_VIN_P_FC \ 4565be78ee9SVipul Pandya (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/ 4575be78ee9SVipul Pandya #define FILTER_SEL_WIDTH_TAG_P_FC \ 4585be78ee9SVipul Pandya (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */ 4595be78ee9SVipul Pandya #define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC) 4605be78ee9SVipul Pandya 4615be78ee9SVipul Pandya static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst, 4625be78ee9SVipul Pandya struct l2t_entry *l2t) 4635be78ee9SVipul Pandya { 4645be78ee9SVipul Pandya unsigned int ntuple = 0; 4655be78ee9SVipul Pandya u32 viid; 4665be78ee9SVipul Pandya 4675be78ee9SVipul Pandya switch (dev->rdev.lldi.filt_mode) { 4685be78ee9SVipul Pandya 4695be78ee9SVipul Pandya /* default filter mode */ 4705be78ee9SVipul Pandya case HW_TPL_FR_MT_PR_IV_P_FC: 4715be78ee9SVipul Pandya if (l2t->vlan == VLAN_NONE) 4725be78ee9SVipul Pandya ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC; 4735be78ee9SVipul Pandya else { 4745be78ee9SVipul Pandya ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC; 4755be78ee9SVipul Pandya ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC; 4765be78ee9SVipul Pandya } 4775be78ee9SVipul Pandya ntuple |= l2t->lport << S_PORT | IPPROTO_TCP << 4785be78ee9SVipul Pandya FILTER_SEL_WIDTH_VLD_TAG_P_FC; 4795be78ee9SVipul Pandya break; 4805be78ee9SVipul Pandya case HW_TPL_FR_MT_PR_OV_P_FC: { 4815be78ee9SVipul Pandya viid = cxgb4_port_viid(l2t->neigh->dev); 4825be78ee9SVipul Pandya 4835be78ee9SVipul Pandya ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC; 4845be78ee9SVipul Pandya ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC; 4855be78ee9SVipul Pandya ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC; 4865be78ee9SVipul Pandya ntuple |= l2t->lport << S_PORT | IPPROTO_TCP << 4875be78ee9SVipul Pandya FILTER_SEL_WIDTH_VLD_TAG_P_FC; 4885be78ee9SVipul Pandya break; 4895be78ee9SVipul Pandya } 4905be78ee9SVipul Pandya default: 4915be78ee9SVipul Pandya break; 4925be78ee9SVipul Pandya } 4935be78ee9SVipul Pandya return ntuple; 4945be78ee9SVipul Pandya } 4955be78ee9SVipul Pandya 496cfdda9d7SSteve Wise static int send_connect(struct c4iw_ep *ep) 497cfdda9d7SSteve Wise { 498cfdda9d7SSteve Wise struct cpl_act_open_req *req; 499cfdda9d7SSteve Wise struct sk_buff *skb; 500cfdda9d7SSteve Wise u64 opt0; 501cfdda9d7SSteve Wise u32 opt2; 502cfdda9d7SSteve Wise unsigned int mtu_idx; 503cfdda9d7SSteve Wise int wscale; 504cfdda9d7SSteve Wise int wrlen = roundup(sizeof *req, 16); 505cfdda9d7SSteve Wise 506cfdda9d7SSteve Wise PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); 507cfdda9d7SSteve Wise 508cfdda9d7SSteve Wise skb = get_skb(NULL, wrlen, GFP_KERNEL); 509cfdda9d7SSteve Wise if (!skb) { 510cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 511cfdda9d7SSteve Wise __func__); 512cfdda9d7SSteve Wise return -ENOMEM; 513cfdda9d7SSteve Wise } 514d4f1a5c6SSteve Wise set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 515cfdda9d7SSteve Wise 516cfdda9d7SSteve Wise cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 517cfdda9d7SSteve Wise wscale = compute_wscale(rcv_win); 5185be78ee9SVipul Pandya opt0 = (nocong ? NO_CONG(1) : 0) | 5195be78ee9SVipul Pandya KEEP_ALIVE(1) | 520ba6d3925SSteve Wise DELACK(1) | 521cfdda9d7SSteve Wise WND_SCALE(wscale) | 522cfdda9d7SSteve Wise MSS_IDX(mtu_idx) | 523cfdda9d7SSteve Wise L2T_IDX(ep->l2t->idx) | 524cfdda9d7SSteve Wise TX_CHAN(ep->tx_chan) | 525cfdda9d7SSteve Wise SMAC_SEL(ep->smac_idx) | 526cfdda9d7SSteve Wise DSCP(ep->tos) | 527b48f3b9cSSteve Wise ULP_MODE(ULP_MODE_TCPDDP) | 528cfdda9d7SSteve Wise RCV_BUFSIZ(rcv_win>>10); 529cfdda9d7SSteve Wise opt2 = RX_CHANNEL(0) | 5305be78ee9SVipul Pandya CCTRL_ECN(enable_ecn) | 531cfdda9d7SSteve Wise RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 532cfdda9d7SSteve Wise if (enable_tcp_timestamps) 533cfdda9d7SSteve Wise opt2 |= TSTAMPS_EN(1); 534cfdda9d7SSteve Wise if (enable_tcp_sack) 535cfdda9d7SSteve Wise opt2 |= SACK_EN(1); 536cfdda9d7SSteve Wise if (wscale && enable_tcp_window_scaling) 537cfdda9d7SSteve Wise opt2 |= WND_SCALE_EN(1); 538cfdda9d7SSteve Wise t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); 539cfdda9d7SSteve Wise 540cfdda9d7SSteve Wise req = (struct cpl_act_open_req *) skb_put(skb, wrlen); 541cfdda9d7SSteve Wise INIT_TP_WR(req, 0); 542cfdda9d7SSteve Wise OPCODE_TID(req) = cpu_to_be32( 543cfdda9d7SSteve Wise MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid))); 544cfdda9d7SSteve Wise req->local_port = ep->com.local_addr.sin_port; 545cfdda9d7SSteve Wise req->peer_port = ep->com.remote_addr.sin_port; 546cfdda9d7SSteve Wise req->local_ip = ep->com.local_addr.sin_addr.s_addr; 547cfdda9d7SSteve Wise req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; 548cfdda9d7SSteve Wise req->opt0 = cpu_to_be64(opt0); 5495be78ee9SVipul Pandya req->params = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, ep->l2t)); 550cfdda9d7SSteve Wise req->opt2 = cpu_to_be32(opt2); 551cfdda9d7SSteve Wise return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 552cfdda9d7SSteve Wise } 553cfdda9d7SSteve Wise 554d2fe99e8SKumar Sanghvi static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, 555d2fe99e8SKumar Sanghvi u8 mpa_rev_to_use) 556cfdda9d7SSteve Wise { 557cfdda9d7SSteve Wise int mpalen, wrlen; 558cfdda9d7SSteve Wise struct fw_ofld_tx_data_wr *req; 559cfdda9d7SSteve Wise struct mpa_message *mpa; 560d2fe99e8SKumar Sanghvi struct mpa_v2_conn_params mpa_v2_params; 561cfdda9d7SSteve Wise 562cfdda9d7SSteve Wise PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 563cfdda9d7SSteve Wise 564cfdda9d7SSteve Wise BUG_ON(skb_cloned(skb)); 565cfdda9d7SSteve Wise 566cfdda9d7SSteve Wise mpalen = sizeof(*mpa) + ep->plen; 567d2fe99e8SKumar Sanghvi if (mpa_rev_to_use == 2) 568d2fe99e8SKumar Sanghvi mpalen += sizeof(struct mpa_v2_conn_params); 569cfdda9d7SSteve Wise wrlen = roundup(mpalen + sizeof *req, 16); 570cfdda9d7SSteve Wise skb = get_skb(skb, wrlen, GFP_KERNEL); 571cfdda9d7SSteve Wise if (!skb) { 572cfdda9d7SSteve Wise connect_reply_upcall(ep, -ENOMEM); 573cfdda9d7SSteve Wise return; 574cfdda9d7SSteve Wise } 575cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 576cfdda9d7SSteve Wise 577cfdda9d7SSteve Wise req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 578cfdda9d7SSteve Wise memset(req, 0, wrlen); 579cfdda9d7SSteve Wise req->op_to_immdlen = cpu_to_be32( 580cfdda9d7SSteve Wise FW_WR_OP(FW_OFLD_TX_DATA_WR) | 581cfdda9d7SSteve Wise FW_WR_COMPL(1) | 582cfdda9d7SSteve Wise FW_WR_IMMDLEN(mpalen)); 583cfdda9d7SSteve Wise req->flowid_len16 = cpu_to_be32( 584cfdda9d7SSteve Wise FW_WR_FLOWID(ep->hwtid) | 585cfdda9d7SSteve Wise FW_WR_LEN16(wrlen >> 4)); 586cfdda9d7SSteve Wise req->plen = cpu_to_be32(mpalen); 587cfdda9d7SSteve Wise req->tunnel_to_proxy = cpu_to_be32( 588cfdda9d7SSteve Wise FW_OFLD_TX_DATA_WR_FLUSH(1) | 589cfdda9d7SSteve Wise FW_OFLD_TX_DATA_WR_SHOVE(1)); 590cfdda9d7SSteve Wise 591cfdda9d7SSteve Wise mpa = (struct mpa_message *)(req + 1); 592cfdda9d7SSteve Wise memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); 593cfdda9d7SSteve Wise mpa->flags = (crc_enabled ? MPA_CRC : 0) | 594d2fe99e8SKumar Sanghvi (markers_enabled ? MPA_MARKERS : 0) | 595d2fe99e8SKumar Sanghvi (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); 596cfdda9d7SSteve Wise mpa->private_data_size = htons(ep->plen); 597d2fe99e8SKumar Sanghvi mpa->revision = mpa_rev_to_use; 59801b225e1SKumar Sanghvi if (mpa_rev_to_use == 1) { 599d2fe99e8SKumar Sanghvi ep->tried_with_mpa_v1 = 1; 60001b225e1SKumar Sanghvi ep->retry_with_mpa_v1 = 0; 60101b225e1SKumar Sanghvi } 602d2fe99e8SKumar Sanghvi 603d2fe99e8SKumar Sanghvi if (mpa_rev_to_use == 2) { 604f747c34aSRoland Dreier mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 605f747c34aSRoland Dreier sizeof (struct mpa_v2_conn_params)); 606d2fe99e8SKumar Sanghvi mpa_v2_params.ird = htons((u16)ep->ird); 607d2fe99e8SKumar Sanghvi mpa_v2_params.ord = htons((u16)ep->ord); 608d2fe99e8SKumar Sanghvi 609d2fe99e8SKumar Sanghvi if (peer2peer) { 610d2fe99e8SKumar Sanghvi mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 611d2fe99e8SKumar Sanghvi if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) 612d2fe99e8SKumar Sanghvi mpa_v2_params.ord |= 613d2fe99e8SKumar Sanghvi htons(MPA_V2_RDMA_WRITE_RTR); 614d2fe99e8SKumar Sanghvi else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) 615d2fe99e8SKumar Sanghvi mpa_v2_params.ord |= 616d2fe99e8SKumar Sanghvi htons(MPA_V2_RDMA_READ_RTR); 617d2fe99e8SKumar Sanghvi } 618d2fe99e8SKumar Sanghvi memcpy(mpa->private_data, &mpa_v2_params, 619d2fe99e8SKumar Sanghvi sizeof(struct mpa_v2_conn_params)); 620cfdda9d7SSteve Wise 621cfdda9d7SSteve Wise if (ep->plen) 622d2fe99e8SKumar Sanghvi memcpy(mpa->private_data + 623d2fe99e8SKumar Sanghvi sizeof(struct mpa_v2_conn_params), 624d2fe99e8SKumar Sanghvi ep->mpa_pkt + sizeof(*mpa), ep->plen); 625d2fe99e8SKumar Sanghvi } else 626d2fe99e8SKumar Sanghvi if (ep->plen) 627d2fe99e8SKumar Sanghvi memcpy(mpa->private_data, 628d2fe99e8SKumar Sanghvi ep->mpa_pkt + sizeof(*mpa), ep->plen); 629cfdda9d7SSteve Wise 630cfdda9d7SSteve Wise /* 631cfdda9d7SSteve Wise * Reference the mpa skb. This ensures the data area 632cfdda9d7SSteve Wise * will remain in memory until the hw acks the tx. 633cfdda9d7SSteve Wise * Function fw4_ack() will deref it. 634cfdda9d7SSteve Wise */ 635cfdda9d7SSteve Wise skb_get(skb); 636cfdda9d7SSteve Wise t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 637cfdda9d7SSteve Wise BUG_ON(ep->mpa_skb); 638cfdda9d7SSteve Wise ep->mpa_skb = skb; 639cfdda9d7SSteve Wise c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 640cfdda9d7SSteve Wise start_ep_timer(ep); 641cfdda9d7SSteve Wise state_set(&ep->com, MPA_REQ_SENT); 642cfdda9d7SSteve Wise ep->mpa_attr.initiator = 1; 643cfdda9d7SSteve Wise return; 644cfdda9d7SSteve Wise } 645cfdda9d7SSteve Wise 646cfdda9d7SSteve Wise static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) 647cfdda9d7SSteve Wise { 648cfdda9d7SSteve Wise int mpalen, wrlen; 649cfdda9d7SSteve Wise struct fw_ofld_tx_data_wr *req; 650cfdda9d7SSteve Wise struct mpa_message *mpa; 651cfdda9d7SSteve Wise struct sk_buff *skb; 652d2fe99e8SKumar Sanghvi struct mpa_v2_conn_params mpa_v2_params; 653cfdda9d7SSteve Wise 654cfdda9d7SSteve Wise PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 655cfdda9d7SSteve Wise 656cfdda9d7SSteve Wise mpalen = sizeof(*mpa) + plen; 657d2fe99e8SKumar Sanghvi if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) 658d2fe99e8SKumar Sanghvi mpalen += sizeof(struct mpa_v2_conn_params); 659cfdda9d7SSteve Wise wrlen = roundup(mpalen + sizeof *req, 16); 660cfdda9d7SSteve Wise 661cfdda9d7SSteve Wise skb = get_skb(NULL, wrlen, GFP_KERNEL); 662cfdda9d7SSteve Wise if (!skb) { 663cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); 664cfdda9d7SSteve Wise return -ENOMEM; 665cfdda9d7SSteve Wise } 666cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 667cfdda9d7SSteve Wise 668cfdda9d7SSteve Wise req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 669cfdda9d7SSteve Wise memset(req, 0, wrlen); 670cfdda9d7SSteve Wise req->op_to_immdlen = cpu_to_be32( 671cfdda9d7SSteve Wise FW_WR_OP(FW_OFLD_TX_DATA_WR) | 672cfdda9d7SSteve Wise FW_WR_COMPL(1) | 673cfdda9d7SSteve Wise FW_WR_IMMDLEN(mpalen)); 674cfdda9d7SSteve Wise req->flowid_len16 = cpu_to_be32( 675cfdda9d7SSteve Wise FW_WR_FLOWID(ep->hwtid) | 676cfdda9d7SSteve Wise FW_WR_LEN16(wrlen >> 4)); 677cfdda9d7SSteve Wise req->plen = cpu_to_be32(mpalen); 678cfdda9d7SSteve Wise req->tunnel_to_proxy = cpu_to_be32( 679cfdda9d7SSteve Wise FW_OFLD_TX_DATA_WR_FLUSH(1) | 680cfdda9d7SSteve Wise FW_OFLD_TX_DATA_WR_SHOVE(1)); 681cfdda9d7SSteve Wise 682cfdda9d7SSteve Wise mpa = (struct mpa_message *)(req + 1); 683cfdda9d7SSteve Wise memset(mpa, 0, sizeof(*mpa)); 684cfdda9d7SSteve Wise memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 685cfdda9d7SSteve Wise mpa->flags = MPA_REJECT; 686cfdda9d7SSteve Wise mpa->revision = mpa_rev; 687cfdda9d7SSteve Wise mpa->private_data_size = htons(plen); 688d2fe99e8SKumar Sanghvi 689d2fe99e8SKumar Sanghvi if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 690d2fe99e8SKumar Sanghvi mpa->flags |= MPA_ENHANCED_RDMA_CONN; 691f747c34aSRoland Dreier mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 692f747c34aSRoland Dreier sizeof (struct mpa_v2_conn_params)); 693d2fe99e8SKumar Sanghvi mpa_v2_params.ird = htons(((u16)ep->ird) | 694d2fe99e8SKumar Sanghvi (peer2peer ? MPA_V2_PEER2PEER_MODEL : 695d2fe99e8SKumar Sanghvi 0)); 696d2fe99e8SKumar Sanghvi mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? 697d2fe99e8SKumar Sanghvi (p2p_type == 698d2fe99e8SKumar Sanghvi FW_RI_INIT_P2PTYPE_RDMA_WRITE ? 699d2fe99e8SKumar Sanghvi MPA_V2_RDMA_WRITE_RTR : p2p_type == 700d2fe99e8SKumar Sanghvi FW_RI_INIT_P2PTYPE_READ_REQ ? 701d2fe99e8SKumar Sanghvi MPA_V2_RDMA_READ_RTR : 0) : 0)); 702d2fe99e8SKumar Sanghvi memcpy(mpa->private_data, &mpa_v2_params, 703d2fe99e8SKumar Sanghvi sizeof(struct mpa_v2_conn_params)); 704d2fe99e8SKumar Sanghvi 705d2fe99e8SKumar Sanghvi if (ep->plen) 706d2fe99e8SKumar Sanghvi memcpy(mpa->private_data + 707d2fe99e8SKumar Sanghvi sizeof(struct mpa_v2_conn_params), pdata, plen); 708d2fe99e8SKumar Sanghvi } else 709cfdda9d7SSteve Wise if (plen) 710cfdda9d7SSteve Wise memcpy(mpa->private_data, pdata, plen); 711cfdda9d7SSteve Wise 712cfdda9d7SSteve Wise /* 713cfdda9d7SSteve Wise * Reference the mpa skb again. This ensures the data area 714cfdda9d7SSteve Wise * will remain in memory until the hw acks the tx. 715cfdda9d7SSteve Wise * Function fw4_ack() will deref it. 716cfdda9d7SSteve Wise */ 717cfdda9d7SSteve Wise skb_get(skb); 718cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 719cfdda9d7SSteve Wise t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 720cfdda9d7SSteve Wise BUG_ON(ep->mpa_skb); 721cfdda9d7SSteve Wise ep->mpa_skb = skb; 722cfdda9d7SSteve Wise return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 723cfdda9d7SSteve Wise } 724cfdda9d7SSteve Wise 725cfdda9d7SSteve Wise static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) 726cfdda9d7SSteve Wise { 727cfdda9d7SSteve Wise int mpalen, wrlen; 728cfdda9d7SSteve Wise struct fw_ofld_tx_data_wr *req; 729cfdda9d7SSteve Wise struct mpa_message *mpa; 730cfdda9d7SSteve Wise struct sk_buff *skb; 731d2fe99e8SKumar Sanghvi struct mpa_v2_conn_params mpa_v2_params; 732cfdda9d7SSteve Wise 733cfdda9d7SSteve Wise PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 734cfdda9d7SSteve Wise 735cfdda9d7SSteve Wise mpalen = sizeof(*mpa) + plen; 736d2fe99e8SKumar Sanghvi if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) 737d2fe99e8SKumar Sanghvi mpalen += sizeof(struct mpa_v2_conn_params); 738cfdda9d7SSteve Wise wrlen = roundup(mpalen + sizeof *req, 16); 739cfdda9d7SSteve Wise 740cfdda9d7SSteve Wise skb = get_skb(NULL, wrlen, GFP_KERNEL); 741cfdda9d7SSteve Wise if (!skb) { 742cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); 743cfdda9d7SSteve Wise return -ENOMEM; 744cfdda9d7SSteve Wise } 745cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 746cfdda9d7SSteve Wise 747cfdda9d7SSteve Wise req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); 748cfdda9d7SSteve Wise memset(req, 0, wrlen); 749cfdda9d7SSteve Wise req->op_to_immdlen = cpu_to_be32( 750cfdda9d7SSteve Wise FW_WR_OP(FW_OFLD_TX_DATA_WR) | 751cfdda9d7SSteve Wise FW_WR_COMPL(1) | 752cfdda9d7SSteve Wise FW_WR_IMMDLEN(mpalen)); 753cfdda9d7SSteve Wise req->flowid_len16 = cpu_to_be32( 754cfdda9d7SSteve Wise FW_WR_FLOWID(ep->hwtid) | 755cfdda9d7SSteve Wise FW_WR_LEN16(wrlen >> 4)); 756cfdda9d7SSteve Wise req->plen = cpu_to_be32(mpalen); 757cfdda9d7SSteve Wise req->tunnel_to_proxy = cpu_to_be32( 758cfdda9d7SSteve Wise FW_OFLD_TX_DATA_WR_FLUSH(1) | 759cfdda9d7SSteve Wise FW_OFLD_TX_DATA_WR_SHOVE(1)); 760cfdda9d7SSteve Wise 761cfdda9d7SSteve Wise mpa = (struct mpa_message *)(req + 1); 762cfdda9d7SSteve Wise memset(mpa, 0, sizeof(*mpa)); 763cfdda9d7SSteve Wise memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 764cfdda9d7SSteve Wise mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | 765cfdda9d7SSteve Wise (markers_enabled ? MPA_MARKERS : 0); 766d2fe99e8SKumar Sanghvi mpa->revision = ep->mpa_attr.version; 767cfdda9d7SSteve Wise mpa->private_data_size = htons(plen); 768d2fe99e8SKumar Sanghvi 769d2fe99e8SKumar Sanghvi if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 770d2fe99e8SKumar Sanghvi mpa->flags |= MPA_ENHANCED_RDMA_CONN; 771f747c34aSRoland Dreier mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 772f747c34aSRoland Dreier sizeof (struct mpa_v2_conn_params)); 773d2fe99e8SKumar Sanghvi mpa_v2_params.ird = htons((u16)ep->ird); 774d2fe99e8SKumar Sanghvi mpa_v2_params.ord = htons((u16)ep->ord); 775d2fe99e8SKumar Sanghvi if (peer2peer && (ep->mpa_attr.p2p_type != 776d2fe99e8SKumar Sanghvi FW_RI_INIT_P2PTYPE_DISABLED)) { 777d2fe99e8SKumar Sanghvi mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 778d2fe99e8SKumar Sanghvi 779d2fe99e8SKumar Sanghvi if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) 780d2fe99e8SKumar Sanghvi mpa_v2_params.ord |= 781d2fe99e8SKumar Sanghvi htons(MPA_V2_RDMA_WRITE_RTR); 782d2fe99e8SKumar Sanghvi else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) 783d2fe99e8SKumar Sanghvi mpa_v2_params.ord |= 784d2fe99e8SKumar Sanghvi htons(MPA_V2_RDMA_READ_RTR); 785d2fe99e8SKumar Sanghvi } 786d2fe99e8SKumar Sanghvi 787d2fe99e8SKumar Sanghvi memcpy(mpa->private_data, &mpa_v2_params, 788d2fe99e8SKumar Sanghvi sizeof(struct mpa_v2_conn_params)); 789d2fe99e8SKumar Sanghvi 790d2fe99e8SKumar Sanghvi if (ep->plen) 791d2fe99e8SKumar Sanghvi memcpy(mpa->private_data + 792d2fe99e8SKumar Sanghvi sizeof(struct mpa_v2_conn_params), pdata, plen); 793d2fe99e8SKumar Sanghvi } else 794cfdda9d7SSteve Wise if (plen) 795cfdda9d7SSteve Wise memcpy(mpa->private_data, pdata, plen); 796cfdda9d7SSteve Wise 797cfdda9d7SSteve Wise /* 798cfdda9d7SSteve Wise * Reference the mpa skb. This ensures the data area 799cfdda9d7SSteve Wise * will remain in memory until the hw acks the tx. 800cfdda9d7SSteve Wise * Function fw4_ack() will deref it. 801cfdda9d7SSteve Wise */ 802cfdda9d7SSteve Wise skb_get(skb); 803cfdda9d7SSteve Wise t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 804cfdda9d7SSteve Wise ep->mpa_skb = skb; 805cfdda9d7SSteve Wise state_set(&ep->com, MPA_REP_SENT); 806cfdda9d7SSteve Wise return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 807cfdda9d7SSteve Wise } 808cfdda9d7SSteve Wise 809cfdda9d7SSteve Wise static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) 810cfdda9d7SSteve Wise { 811cfdda9d7SSteve Wise struct c4iw_ep *ep; 812cfdda9d7SSteve Wise struct cpl_act_establish *req = cplhdr(skb); 813cfdda9d7SSteve Wise unsigned int tid = GET_TID(req); 814cfdda9d7SSteve Wise unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); 815cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 816cfdda9d7SSteve Wise 817cfdda9d7SSteve Wise ep = lookup_atid(t, atid); 818cfdda9d7SSteve Wise 819cfdda9d7SSteve Wise PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid, 820cfdda9d7SSteve Wise be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); 821cfdda9d7SSteve Wise 822cfdda9d7SSteve Wise dst_confirm(ep->dst); 823cfdda9d7SSteve Wise 824cfdda9d7SSteve Wise /* setup the hwtid for this connection */ 825cfdda9d7SSteve Wise ep->hwtid = tid; 826cfdda9d7SSteve Wise cxgb4_insert_tid(t, ep, tid); 827cfdda9d7SSteve Wise 828cfdda9d7SSteve Wise ep->snd_seq = be32_to_cpu(req->snd_isn); 829cfdda9d7SSteve Wise ep->rcv_seq = be32_to_cpu(req->rcv_isn); 830cfdda9d7SSteve Wise 831cfdda9d7SSteve Wise set_emss(ep, ntohs(req->tcp_opt)); 832cfdda9d7SSteve Wise 833cfdda9d7SSteve Wise /* dealloc the atid */ 834cfdda9d7SSteve Wise cxgb4_free_atid(t, atid); 835cfdda9d7SSteve Wise 836cfdda9d7SSteve Wise /* start MPA negotiation */ 837cfdda9d7SSteve Wise send_flowc(ep, NULL); 838d2fe99e8SKumar Sanghvi if (ep->retry_with_mpa_v1) 839d2fe99e8SKumar Sanghvi send_mpa_req(ep, skb, 1); 840d2fe99e8SKumar Sanghvi else 841d2fe99e8SKumar Sanghvi send_mpa_req(ep, skb, mpa_rev); 842cfdda9d7SSteve Wise 843cfdda9d7SSteve Wise return 0; 844cfdda9d7SSteve Wise } 845cfdda9d7SSteve Wise 846cfdda9d7SSteve Wise static void close_complete_upcall(struct c4iw_ep *ep) 847cfdda9d7SSteve Wise { 848cfdda9d7SSteve Wise struct iw_cm_event event; 849cfdda9d7SSteve Wise 850cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 851cfdda9d7SSteve Wise memset(&event, 0, sizeof(event)); 852cfdda9d7SSteve Wise event.event = IW_CM_EVENT_CLOSE; 853cfdda9d7SSteve Wise if (ep->com.cm_id) { 854cfdda9d7SSteve Wise PDBG("close complete delivered ep %p cm_id %p tid %u\n", 855cfdda9d7SSteve Wise ep, ep->com.cm_id, ep->hwtid); 856cfdda9d7SSteve Wise ep->com.cm_id->event_handler(ep->com.cm_id, &event); 857cfdda9d7SSteve Wise ep->com.cm_id->rem_ref(ep->com.cm_id); 858cfdda9d7SSteve Wise ep->com.cm_id = NULL; 859cfdda9d7SSteve Wise ep->com.qp = NULL; 860cfdda9d7SSteve Wise } 861cfdda9d7SSteve Wise } 862cfdda9d7SSteve Wise 863cfdda9d7SSteve Wise static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) 864cfdda9d7SSteve Wise { 865cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 866cfdda9d7SSteve Wise close_complete_upcall(ep); 867cfdda9d7SSteve Wise state_set(&ep->com, ABORTING); 868cfdda9d7SSteve Wise return send_abort(ep, skb, gfp); 869cfdda9d7SSteve Wise } 870cfdda9d7SSteve Wise 871cfdda9d7SSteve Wise static void peer_close_upcall(struct c4iw_ep *ep) 872cfdda9d7SSteve Wise { 873cfdda9d7SSteve Wise struct iw_cm_event event; 874cfdda9d7SSteve Wise 875cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 876cfdda9d7SSteve Wise memset(&event, 0, sizeof(event)); 877cfdda9d7SSteve Wise event.event = IW_CM_EVENT_DISCONNECT; 878cfdda9d7SSteve Wise if (ep->com.cm_id) { 879cfdda9d7SSteve Wise PDBG("peer close delivered ep %p cm_id %p tid %u\n", 880cfdda9d7SSteve Wise ep, ep->com.cm_id, ep->hwtid); 881cfdda9d7SSteve Wise ep->com.cm_id->event_handler(ep->com.cm_id, &event); 882cfdda9d7SSteve Wise } 883cfdda9d7SSteve Wise } 884cfdda9d7SSteve Wise 885cfdda9d7SSteve Wise static void peer_abort_upcall(struct c4iw_ep *ep) 886cfdda9d7SSteve Wise { 887cfdda9d7SSteve Wise struct iw_cm_event event; 888cfdda9d7SSteve Wise 889cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 890cfdda9d7SSteve Wise memset(&event, 0, sizeof(event)); 891cfdda9d7SSteve Wise event.event = IW_CM_EVENT_CLOSE; 892cfdda9d7SSteve Wise event.status = -ECONNRESET; 893cfdda9d7SSteve Wise if (ep->com.cm_id) { 894cfdda9d7SSteve Wise PDBG("abort delivered ep %p cm_id %p tid %u\n", ep, 895cfdda9d7SSteve Wise ep->com.cm_id, ep->hwtid); 896cfdda9d7SSteve Wise ep->com.cm_id->event_handler(ep->com.cm_id, &event); 897cfdda9d7SSteve Wise ep->com.cm_id->rem_ref(ep->com.cm_id); 898cfdda9d7SSteve Wise ep->com.cm_id = NULL; 899cfdda9d7SSteve Wise ep->com.qp = NULL; 900cfdda9d7SSteve Wise } 901cfdda9d7SSteve Wise } 902cfdda9d7SSteve Wise 903cfdda9d7SSteve Wise static void connect_reply_upcall(struct c4iw_ep *ep, int status) 904cfdda9d7SSteve Wise { 905cfdda9d7SSteve Wise struct iw_cm_event event; 906cfdda9d7SSteve Wise 907cfdda9d7SSteve Wise PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status); 908cfdda9d7SSteve Wise memset(&event, 0, sizeof(event)); 909cfdda9d7SSteve Wise event.event = IW_CM_EVENT_CONNECT_REPLY; 910cfdda9d7SSteve Wise event.status = status; 911cfdda9d7SSteve Wise event.local_addr = ep->com.local_addr; 912cfdda9d7SSteve Wise event.remote_addr = ep->com.remote_addr; 913cfdda9d7SSteve Wise 914cfdda9d7SSteve Wise if ((status == 0) || (status == -ECONNREFUSED)) { 915d2fe99e8SKumar Sanghvi if (!ep->tried_with_mpa_v1) { 916d2fe99e8SKumar Sanghvi /* this means MPA_v2 is used */ 917d2fe99e8SKumar Sanghvi event.private_data_len = ep->plen - 918d2fe99e8SKumar Sanghvi sizeof(struct mpa_v2_conn_params); 919d2fe99e8SKumar Sanghvi event.private_data = ep->mpa_pkt + 920d2fe99e8SKumar Sanghvi sizeof(struct mpa_message) + 921d2fe99e8SKumar Sanghvi sizeof(struct mpa_v2_conn_params); 922d2fe99e8SKumar Sanghvi } else { 923d2fe99e8SKumar Sanghvi /* this means MPA_v1 is used */ 924cfdda9d7SSteve Wise event.private_data_len = ep->plen; 925d2fe99e8SKumar Sanghvi event.private_data = ep->mpa_pkt + 926d2fe99e8SKumar Sanghvi sizeof(struct mpa_message); 927d2fe99e8SKumar Sanghvi } 928cfdda9d7SSteve Wise } 92985963e4cSRoland Dreier 930cfdda9d7SSteve Wise PDBG("%s ep %p tid %u status %d\n", __func__, ep, 931cfdda9d7SSteve Wise ep->hwtid, status); 932cfdda9d7SSteve Wise ep->com.cm_id->event_handler(ep->com.cm_id, &event); 93385963e4cSRoland Dreier 934cfdda9d7SSteve Wise if (status < 0) { 935cfdda9d7SSteve Wise ep->com.cm_id->rem_ref(ep->com.cm_id); 936cfdda9d7SSteve Wise ep->com.cm_id = NULL; 937cfdda9d7SSteve Wise ep->com.qp = NULL; 938cfdda9d7SSteve Wise } 939cfdda9d7SSteve Wise } 940cfdda9d7SSteve Wise 941cfdda9d7SSteve Wise static void connect_request_upcall(struct c4iw_ep *ep) 942cfdda9d7SSteve Wise { 943cfdda9d7SSteve Wise struct iw_cm_event event; 944cfdda9d7SSteve Wise 945cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 946cfdda9d7SSteve Wise memset(&event, 0, sizeof(event)); 947cfdda9d7SSteve Wise event.event = IW_CM_EVENT_CONNECT_REQUEST; 948cfdda9d7SSteve Wise event.local_addr = ep->com.local_addr; 949cfdda9d7SSteve Wise event.remote_addr = ep->com.remote_addr; 950d2fe99e8SKumar Sanghvi event.provider_data = ep; 951d2fe99e8SKumar Sanghvi if (!ep->tried_with_mpa_v1) { 952d2fe99e8SKumar Sanghvi /* this means MPA_v2 is used */ 953d2fe99e8SKumar Sanghvi event.ord = ep->ord; 954d2fe99e8SKumar Sanghvi event.ird = ep->ird; 955d2fe99e8SKumar Sanghvi event.private_data_len = ep->plen - 956d2fe99e8SKumar Sanghvi sizeof(struct mpa_v2_conn_params); 957d2fe99e8SKumar Sanghvi event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + 958d2fe99e8SKumar Sanghvi sizeof(struct mpa_v2_conn_params); 959d2fe99e8SKumar Sanghvi } else { 960d2fe99e8SKumar Sanghvi /* this means MPA_v1 is used. Send max supported */ 961d2fe99e8SKumar Sanghvi event.ord = c4iw_max_read_depth; 962d2fe99e8SKumar Sanghvi event.ird = c4iw_max_read_depth; 963cfdda9d7SSteve Wise event.private_data_len = ep->plen; 964cfdda9d7SSteve Wise event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 965d2fe99e8SKumar Sanghvi } 966cfdda9d7SSteve Wise if (state_read(&ep->parent_ep->com) != DEAD) { 967cfdda9d7SSteve Wise c4iw_get_ep(&ep->com); 968cfdda9d7SSteve Wise ep->parent_ep->com.cm_id->event_handler( 969cfdda9d7SSteve Wise ep->parent_ep->com.cm_id, 970cfdda9d7SSteve Wise &event); 971cfdda9d7SSteve Wise } 972cfdda9d7SSteve Wise c4iw_put_ep(&ep->parent_ep->com); 973cfdda9d7SSteve Wise ep->parent_ep = NULL; 974cfdda9d7SSteve Wise } 975cfdda9d7SSteve Wise 976cfdda9d7SSteve Wise static void established_upcall(struct c4iw_ep *ep) 977cfdda9d7SSteve Wise { 978cfdda9d7SSteve Wise struct iw_cm_event event; 979cfdda9d7SSteve Wise 980cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 981cfdda9d7SSteve Wise memset(&event, 0, sizeof(event)); 982cfdda9d7SSteve Wise event.event = IW_CM_EVENT_ESTABLISHED; 983d2fe99e8SKumar Sanghvi event.ird = ep->ird; 984d2fe99e8SKumar Sanghvi event.ord = ep->ord; 985cfdda9d7SSteve Wise if (ep->com.cm_id) { 986cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 987cfdda9d7SSteve Wise ep->com.cm_id->event_handler(ep->com.cm_id, &event); 988cfdda9d7SSteve Wise } 989cfdda9d7SSteve Wise } 990cfdda9d7SSteve Wise 991cfdda9d7SSteve Wise static int update_rx_credits(struct c4iw_ep *ep, u32 credits) 992cfdda9d7SSteve Wise { 993cfdda9d7SSteve Wise struct cpl_rx_data_ack *req; 994cfdda9d7SSteve Wise struct sk_buff *skb; 995cfdda9d7SSteve Wise int wrlen = roundup(sizeof *req, 16); 996cfdda9d7SSteve Wise 997cfdda9d7SSteve Wise PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); 998cfdda9d7SSteve Wise skb = get_skb(NULL, wrlen, GFP_KERNEL); 999cfdda9d7SSteve Wise if (!skb) { 1000cfdda9d7SSteve Wise printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); 1001cfdda9d7SSteve Wise return 0; 1002cfdda9d7SSteve Wise } 1003cfdda9d7SSteve Wise 1004cfdda9d7SSteve Wise req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); 1005cfdda9d7SSteve Wise memset(req, 0, wrlen); 1006cfdda9d7SSteve Wise INIT_TP_WR(req, ep->hwtid); 1007cfdda9d7SSteve Wise OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 1008cfdda9d7SSteve Wise ep->hwtid)); 1009ba6d3925SSteve Wise req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) | 1010ba6d3925SSteve Wise F_RX_DACK_CHANGE | 1011ba6d3925SSteve Wise V_RX_DACK_MODE(dack_mode)); 1012d4f1a5c6SSteve Wise set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); 1013cfdda9d7SSteve Wise c4iw_ofld_send(&ep->com.dev->rdev, skb); 1014cfdda9d7SSteve Wise return credits; 1015cfdda9d7SSteve Wise } 1016cfdda9d7SSteve Wise 1017cfdda9d7SSteve Wise static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) 1018cfdda9d7SSteve Wise { 1019cfdda9d7SSteve Wise struct mpa_message *mpa; 1020d2fe99e8SKumar Sanghvi struct mpa_v2_conn_params *mpa_v2_params; 1021cfdda9d7SSteve Wise u16 plen; 1022d2fe99e8SKumar Sanghvi u16 resp_ird, resp_ord; 1023d2fe99e8SKumar Sanghvi u8 rtr_mismatch = 0, insuff_ird = 0; 1024cfdda9d7SSteve Wise struct c4iw_qp_attributes attrs; 1025cfdda9d7SSteve Wise enum c4iw_qp_attr_mask mask; 1026cfdda9d7SSteve Wise int err; 1027cfdda9d7SSteve Wise 1028cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1029cfdda9d7SSteve Wise 1030cfdda9d7SSteve Wise /* 1031cfdda9d7SSteve Wise * Stop mpa timer. If it expired, then the state has 1032cfdda9d7SSteve Wise * changed and we bail since ep_timeout already aborted 1033cfdda9d7SSteve Wise * the connection. 1034cfdda9d7SSteve Wise */ 1035cfdda9d7SSteve Wise stop_ep_timer(ep); 1036cfdda9d7SSteve Wise if (state_read(&ep->com) != MPA_REQ_SENT) 1037cfdda9d7SSteve Wise return; 1038cfdda9d7SSteve Wise 1039cfdda9d7SSteve Wise /* 1040cfdda9d7SSteve Wise * If we get more than the supported amount of private data 1041cfdda9d7SSteve Wise * then we must fail this connection. 1042cfdda9d7SSteve Wise */ 1043cfdda9d7SSteve Wise if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 1044cfdda9d7SSteve Wise err = -EINVAL; 1045cfdda9d7SSteve Wise goto err; 1046cfdda9d7SSteve Wise } 1047cfdda9d7SSteve Wise 1048cfdda9d7SSteve Wise /* 1049cfdda9d7SSteve Wise * copy the new data into our accumulation buffer. 1050cfdda9d7SSteve Wise */ 1051cfdda9d7SSteve Wise skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 1052cfdda9d7SSteve Wise skb->len); 1053cfdda9d7SSteve Wise ep->mpa_pkt_len += skb->len; 1054cfdda9d7SSteve Wise 1055cfdda9d7SSteve Wise /* 1056cfdda9d7SSteve Wise * if we don't even have the mpa message, then bail. 1057cfdda9d7SSteve Wise */ 1058cfdda9d7SSteve Wise if (ep->mpa_pkt_len < sizeof(*mpa)) 1059cfdda9d7SSteve Wise return; 1060cfdda9d7SSteve Wise mpa = (struct mpa_message *) ep->mpa_pkt; 1061cfdda9d7SSteve Wise 1062cfdda9d7SSteve Wise /* Validate MPA header. */ 1063d2fe99e8SKumar Sanghvi if (mpa->revision > mpa_rev) { 1064d2fe99e8SKumar Sanghvi printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," 1065d2fe99e8SKumar Sanghvi " Received = %d\n", __func__, mpa_rev, mpa->revision); 1066cfdda9d7SSteve Wise err = -EPROTO; 1067cfdda9d7SSteve Wise goto err; 1068cfdda9d7SSteve Wise } 1069cfdda9d7SSteve Wise if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { 1070cfdda9d7SSteve Wise err = -EPROTO; 1071cfdda9d7SSteve Wise goto err; 1072cfdda9d7SSteve Wise } 1073cfdda9d7SSteve Wise 1074cfdda9d7SSteve Wise plen = ntohs(mpa->private_data_size); 1075cfdda9d7SSteve Wise 1076cfdda9d7SSteve Wise /* 1077cfdda9d7SSteve Wise * Fail if there's too much private data. 1078cfdda9d7SSteve Wise */ 1079cfdda9d7SSteve Wise if (plen > MPA_MAX_PRIVATE_DATA) { 1080cfdda9d7SSteve Wise err = -EPROTO; 1081cfdda9d7SSteve Wise goto err; 1082cfdda9d7SSteve Wise } 1083cfdda9d7SSteve Wise 1084cfdda9d7SSteve Wise /* 1085cfdda9d7SSteve Wise * If plen does not account for pkt size 1086cfdda9d7SSteve Wise */ 1087cfdda9d7SSteve Wise if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1088cfdda9d7SSteve Wise err = -EPROTO; 1089cfdda9d7SSteve Wise goto err; 1090cfdda9d7SSteve Wise } 1091cfdda9d7SSteve Wise 1092cfdda9d7SSteve Wise ep->plen = (u8) plen; 1093cfdda9d7SSteve Wise 1094cfdda9d7SSteve Wise /* 1095cfdda9d7SSteve Wise * If we don't have all the pdata yet, then bail. 1096cfdda9d7SSteve Wise * We'll continue process when more data arrives. 1097cfdda9d7SSteve Wise */ 1098cfdda9d7SSteve Wise if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1099cfdda9d7SSteve Wise return; 1100cfdda9d7SSteve Wise 1101cfdda9d7SSteve Wise if (mpa->flags & MPA_REJECT) { 1102cfdda9d7SSteve Wise err = -ECONNREFUSED; 1103cfdda9d7SSteve Wise goto err; 1104cfdda9d7SSteve Wise } 1105cfdda9d7SSteve Wise 1106cfdda9d7SSteve Wise /* 1107cfdda9d7SSteve Wise * If we get here we have accumulated the entire mpa 1108cfdda9d7SSteve Wise * start reply message including private data. And 1109cfdda9d7SSteve Wise * the MPA header is valid. 1110cfdda9d7SSteve Wise */ 1111cfdda9d7SSteve Wise state_set(&ep->com, FPDU_MODE); 1112cfdda9d7SSteve Wise ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1113cfdda9d7SSteve Wise ep->mpa_attr.recv_marker_enabled = markers_enabled; 1114cfdda9d7SSteve Wise ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1115d2fe99e8SKumar Sanghvi ep->mpa_attr.version = mpa->revision; 1116d2fe99e8SKumar Sanghvi ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1117d2fe99e8SKumar Sanghvi 1118d2fe99e8SKumar Sanghvi if (mpa->revision == 2) { 1119d2fe99e8SKumar Sanghvi ep->mpa_attr.enhanced_rdma_conn = 1120d2fe99e8SKumar Sanghvi mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1121d2fe99e8SKumar Sanghvi if (ep->mpa_attr.enhanced_rdma_conn) { 1122d2fe99e8SKumar Sanghvi mpa_v2_params = (struct mpa_v2_conn_params *) 1123d2fe99e8SKumar Sanghvi (ep->mpa_pkt + sizeof(*mpa)); 1124d2fe99e8SKumar Sanghvi resp_ird = ntohs(mpa_v2_params->ird) & 1125d2fe99e8SKumar Sanghvi MPA_V2_IRD_ORD_MASK; 1126d2fe99e8SKumar Sanghvi resp_ord = ntohs(mpa_v2_params->ord) & 1127d2fe99e8SKumar Sanghvi MPA_V2_IRD_ORD_MASK; 1128d2fe99e8SKumar Sanghvi 1129d2fe99e8SKumar Sanghvi /* 1130d2fe99e8SKumar Sanghvi * This is a double-check. Ideally, below checks are 1131d2fe99e8SKumar Sanghvi * not required since ird/ord stuff has been taken 1132d2fe99e8SKumar Sanghvi * care of in c4iw_accept_cr 1133d2fe99e8SKumar Sanghvi */ 1134d2fe99e8SKumar Sanghvi if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) { 1135d2fe99e8SKumar Sanghvi err = -ENOMEM; 1136d2fe99e8SKumar Sanghvi ep->ird = resp_ord; 1137d2fe99e8SKumar Sanghvi ep->ord = resp_ird; 1138d2fe99e8SKumar Sanghvi insuff_ird = 1; 1139d2fe99e8SKumar Sanghvi } 1140d2fe99e8SKumar Sanghvi 1141d2fe99e8SKumar Sanghvi if (ntohs(mpa_v2_params->ird) & 1142d2fe99e8SKumar Sanghvi MPA_V2_PEER2PEER_MODEL) { 1143d2fe99e8SKumar Sanghvi if (ntohs(mpa_v2_params->ord) & 1144d2fe99e8SKumar Sanghvi MPA_V2_RDMA_WRITE_RTR) 1145d2fe99e8SKumar Sanghvi ep->mpa_attr.p2p_type = 1146d2fe99e8SKumar Sanghvi FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1147d2fe99e8SKumar Sanghvi else if (ntohs(mpa_v2_params->ord) & 1148d2fe99e8SKumar Sanghvi MPA_V2_RDMA_READ_RTR) 1149d2fe99e8SKumar Sanghvi ep->mpa_attr.p2p_type = 1150d2fe99e8SKumar Sanghvi FW_RI_INIT_P2PTYPE_READ_REQ; 1151d2fe99e8SKumar Sanghvi } 1152d2fe99e8SKumar Sanghvi } 1153d2fe99e8SKumar Sanghvi } else if (mpa->revision == 1) 1154d2fe99e8SKumar Sanghvi if (peer2peer) 1155d2fe99e8SKumar Sanghvi ep->mpa_attr.p2p_type = p2p_type; 1156d2fe99e8SKumar Sanghvi 1157cfdda9d7SSteve Wise PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 1158d2fe99e8SKumar Sanghvi "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = " 1159d2fe99e8SKumar Sanghvi "%d\n", __func__, ep->mpa_attr.crc_enabled, 1160d2fe99e8SKumar Sanghvi ep->mpa_attr.recv_marker_enabled, 1161d2fe99e8SKumar Sanghvi ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1162d2fe99e8SKumar Sanghvi ep->mpa_attr.p2p_type, p2p_type); 1163d2fe99e8SKumar Sanghvi 1164d2fe99e8SKumar Sanghvi /* 1165d2fe99e8SKumar Sanghvi * If responder's RTR does not match with that of initiator, assign 1166d2fe99e8SKumar Sanghvi * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not 1167d2fe99e8SKumar Sanghvi * generated when moving QP to RTS state. 1168d2fe99e8SKumar Sanghvi * A TERM message will be sent after QP has moved to RTS state 1169d2fe99e8SKumar Sanghvi */ 117091018f86SKumar Sanghvi if ((ep->mpa_attr.version == 2) && peer2peer && 1171d2fe99e8SKumar Sanghvi (ep->mpa_attr.p2p_type != p2p_type)) { 1172d2fe99e8SKumar Sanghvi ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1173d2fe99e8SKumar Sanghvi rtr_mismatch = 1; 1174d2fe99e8SKumar Sanghvi } 1175cfdda9d7SSteve Wise 1176cfdda9d7SSteve Wise attrs.mpa_attr = ep->mpa_attr; 1177cfdda9d7SSteve Wise attrs.max_ird = ep->ird; 1178cfdda9d7SSteve Wise attrs.max_ord = ep->ord; 1179cfdda9d7SSteve Wise attrs.llp_stream_handle = ep; 1180cfdda9d7SSteve Wise attrs.next_state = C4IW_QP_STATE_RTS; 1181cfdda9d7SSteve Wise 1182cfdda9d7SSteve Wise mask = C4IW_QP_ATTR_NEXT_STATE | 1183cfdda9d7SSteve Wise C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | 1184cfdda9d7SSteve Wise C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; 1185cfdda9d7SSteve Wise 1186cfdda9d7SSteve Wise /* bind QP and TID with INIT_WR */ 1187cfdda9d7SSteve Wise err = c4iw_modify_qp(ep->com.qp->rhp, 1188cfdda9d7SSteve Wise ep->com.qp, mask, &attrs, 1); 1189cfdda9d7SSteve Wise if (err) 1190cfdda9d7SSteve Wise goto err; 1191d2fe99e8SKumar Sanghvi 1192d2fe99e8SKumar Sanghvi /* 1193d2fe99e8SKumar Sanghvi * If responder's RTR requirement did not match with what initiator 1194d2fe99e8SKumar Sanghvi * supports, generate TERM message 1195d2fe99e8SKumar Sanghvi */ 1196d2fe99e8SKumar Sanghvi if (rtr_mismatch) { 1197d2fe99e8SKumar Sanghvi printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__); 1198d2fe99e8SKumar Sanghvi attrs.layer_etype = LAYER_MPA | DDP_LLP; 1199d2fe99e8SKumar Sanghvi attrs.ecode = MPA_NOMATCH_RTR; 1200d2fe99e8SKumar Sanghvi attrs.next_state = C4IW_QP_STATE_TERMINATE; 1201d2fe99e8SKumar Sanghvi err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1202d2fe99e8SKumar Sanghvi C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1203d2fe99e8SKumar Sanghvi err = -ENOMEM; 1204d2fe99e8SKumar Sanghvi goto out; 1205d2fe99e8SKumar Sanghvi } 1206d2fe99e8SKumar Sanghvi 1207d2fe99e8SKumar Sanghvi /* 1208d2fe99e8SKumar Sanghvi * Generate TERM if initiator IRD is not sufficient for responder 1209d2fe99e8SKumar Sanghvi * provided ORD. Currently, we do the same behaviour even when 1210d2fe99e8SKumar Sanghvi * responder provided IRD is also not sufficient as regards to 1211d2fe99e8SKumar Sanghvi * initiator ORD. 1212d2fe99e8SKumar Sanghvi */ 1213d2fe99e8SKumar Sanghvi if (insuff_ird) { 1214d2fe99e8SKumar Sanghvi printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n", 1215d2fe99e8SKumar Sanghvi __func__); 1216d2fe99e8SKumar Sanghvi attrs.layer_etype = LAYER_MPA | DDP_LLP; 1217d2fe99e8SKumar Sanghvi attrs.ecode = MPA_INSUFF_IRD; 1218d2fe99e8SKumar Sanghvi attrs.next_state = C4IW_QP_STATE_TERMINATE; 1219d2fe99e8SKumar Sanghvi err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1220d2fe99e8SKumar Sanghvi C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1221d2fe99e8SKumar Sanghvi err = -ENOMEM; 1222d2fe99e8SKumar Sanghvi goto out; 1223d2fe99e8SKumar Sanghvi } 1224cfdda9d7SSteve Wise goto out; 1225cfdda9d7SSteve Wise err: 1226b21ef16aSSteve Wise state_set(&ep->com, ABORTING); 1227b21ef16aSSteve Wise send_abort(ep, skb, GFP_KERNEL); 1228cfdda9d7SSteve Wise out: 1229cfdda9d7SSteve Wise connect_reply_upcall(ep, err); 1230cfdda9d7SSteve Wise return; 1231cfdda9d7SSteve Wise } 1232cfdda9d7SSteve Wise 1233cfdda9d7SSteve Wise static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) 1234cfdda9d7SSteve Wise { 1235cfdda9d7SSteve Wise struct mpa_message *mpa; 1236d2fe99e8SKumar Sanghvi struct mpa_v2_conn_params *mpa_v2_params; 1237cfdda9d7SSteve Wise u16 plen; 1238cfdda9d7SSteve Wise 1239cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1240cfdda9d7SSteve Wise 1241cfdda9d7SSteve Wise if (state_read(&ep->com) != MPA_REQ_WAIT) 1242cfdda9d7SSteve Wise return; 1243cfdda9d7SSteve Wise 1244cfdda9d7SSteve Wise /* 1245cfdda9d7SSteve Wise * If we get more than the supported amount of private data 1246cfdda9d7SSteve Wise * then we must fail this connection. 1247cfdda9d7SSteve Wise */ 1248cfdda9d7SSteve Wise if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 1249cfdda9d7SSteve Wise stop_ep_timer(ep); 1250cfdda9d7SSteve Wise abort_connection(ep, skb, GFP_KERNEL); 1251cfdda9d7SSteve Wise return; 1252cfdda9d7SSteve Wise } 1253cfdda9d7SSteve Wise 1254cfdda9d7SSteve Wise PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); 1255cfdda9d7SSteve Wise 1256cfdda9d7SSteve Wise /* 1257cfdda9d7SSteve Wise * Copy the new data into our accumulation buffer. 1258cfdda9d7SSteve Wise */ 1259cfdda9d7SSteve Wise skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 1260cfdda9d7SSteve Wise skb->len); 1261cfdda9d7SSteve Wise ep->mpa_pkt_len += skb->len; 1262cfdda9d7SSteve Wise 1263cfdda9d7SSteve Wise /* 1264cfdda9d7SSteve Wise * If we don't even have the mpa message, then bail. 1265cfdda9d7SSteve Wise * We'll continue process when more data arrives. 1266cfdda9d7SSteve Wise */ 1267cfdda9d7SSteve Wise if (ep->mpa_pkt_len < sizeof(*mpa)) 1268cfdda9d7SSteve Wise return; 1269cfdda9d7SSteve Wise 1270cfdda9d7SSteve Wise PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); 1271cfdda9d7SSteve Wise stop_ep_timer(ep); 1272cfdda9d7SSteve Wise mpa = (struct mpa_message *) ep->mpa_pkt; 1273cfdda9d7SSteve Wise 1274cfdda9d7SSteve Wise /* 1275cfdda9d7SSteve Wise * Validate MPA Header. 1276cfdda9d7SSteve Wise */ 1277d2fe99e8SKumar Sanghvi if (mpa->revision > mpa_rev) { 1278d2fe99e8SKumar Sanghvi printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," 1279d2fe99e8SKumar Sanghvi " Received = %d\n", __func__, mpa_rev, mpa->revision); 1280cfdda9d7SSteve Wise abort_connection(ep, skb, GFP_KERNEL); 1281cfdda9d7SSteve Wise return; 1282cfdda9d7SSteve Wise } 1283cfdda9d7SSteve Wise 1284cfdda9d7SSteve Wise if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { 1285cfdda9d7SSteve Wise abort_connection(ep, skb, GFP_KERNEL); 1286cfdda9d7SSteve Wise return; 1287cfdda9d7SSteve Wise } 1288cfdda9d7SSteve Wise 1289cfdda9d7SSteve Wise plen = ntohs(mpa->private_data_size); 1290cfdda9d7SSteve Wise 1291cfdda9d7SSteve Wise /* 1292cfdda9d7SSteve Wise * Fail if there's too much private data. 1293cfdda9d7SSteve Wise */ 1294cfdda9d7SSteve Wise if (plen > MPA_MAX_PRIVATE_DATA) { 1295cfdda9d7SSteve Wise abort_connection(ep, skb, GFP_KERNEL); 1296cfdda9d7SSteve Wise return; 1297cfdda9d7SSteve Wise } 1298cfdda9d7SSteve Wise 1299cfdda9d7SSteve Wise /* 1300cfdda9d7SSteve Wise * If plen does not account for pkt size 1301cfdda9d7SSteve Wise */ 1302cfdda9d7SSteve Wise if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1303cfdda9d7SSteve Wise abort_connection(ep, skb, GFP_KERNEL); 1304cfdda9d7SSteve Wise return; 1305cfdda9d7SSteve Wise } 1306cfdda9d7SSteve Wise ep->plen = (u8) plen; 1307cfdda9d7SSteve Wise 1308cfdda9d7SSteve Wise /* 1309cfdda9d7SSteve Wise * If we don't have all the pdata yet, then bail. 1310cfdda9d7SSteve Wise */ 1311cfdda9d7SSteve Wise if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1312cfdda9d7SSteve Wise return; 1313cfdda9d7SSteve Wise 1314cfdda9d7SSteve Wise /* 1315cfdda9d7SSteve Wise * If we get here we have accumulated the entire mpa 1316cfdda9d7SSteve Wise * start reply message including private data. 1317cfdda9d7SSteve Wise */ 1318cfdda9d7SSteve Wise ep->mpa_attr.initiator = 0; 1319cfdda9d7SSteve Wise ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1320cfdda9d7SSteve Wise ep->mpa_attr.recv_marker_enabled = markers_enabled; 1321cfdda9d7SSteve Wise ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1322d2fe99e8SKumar Sanghvi ep->mpa_attr.version = mpa->revision; 1323d2fe99e8SKumar Sanghvi if (mpa->revision == 1) 1324d2fe99e8SKumar Sanghvi ep->tried_with_mpa_v1 = 1; 1325d2fe99e8SKumar Sanghvi ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1326d2fe99e8SKumar Sanghvi 1327d2fe99e8SKumar Sanghvi if (mpa->revision == 2) { 1328d2fe99e8SKumar Sanghvi ep->mpa_attr.enhanced_rdma_conn = 1329d2fe99e8SKumar Sanghvi mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1330d2fe99e8SKumar Sanghvi if (ep->mpa_attr.enhanced_rdma_conn) { 1331d2fe99e8SKumar Sanghvi mpa_v2_params = (struct mpa_v2_conn_params *) 1332d2fe99e8SKumar Sanghvi (ep->mpa_pkt + sizeof(*mpa)); 1333d2fe99e8SKumar Sanghvi ep->ird = ntohs(mpa_v2_params->ird) & 1334d2fe99e8SKumar Sanghvi MPA_V2_IRD_ORD_MASK; 1335d2fe99e8SKumar Sanghvi ep->ord = ntohs(mpa_v2_params->ord) & 1336d2fe99e8SKumar Sanghvi MPA_V2_IRD_ORD_MASK; 1337d2fe99e8SKumar Sanghvi if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) 1338d2fe99e8SKumar Sanghvi if (peer2peer) { 1339d2fe99e8SKumar Sanghvi if (ntohs(mpa_v2_params->ord) & 1340d2fe99e8SKumar Sanghvi MPA_V2_RDMA_WRITE_RTR) 1341d2fe99e8SKumar Sanghvi ep->mpa_attr.p2p_type = 1342d2fe99e8SKumar Sanghvi FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1343d2fe99e8SKumar Sanghvi else if (ntohs(mpa_v2_params->ord) & 1344d2fe99e8SKumar Sanghvi MPA_V2_RDMA_READ_RTR) 1345d2fe99e8SKumar Sanghvi ep->mpa_attr.p2p_type = 1346d2fe99e8SKumar Sanghvi FW_RI_INIT_P2PTYPE_READ_REQ; 1347d2fe99e8SKumar Sanghvi } 1348d2fe99e8SKumar Sanghvi } 1349d2fe99e8SKumar Sanghvi } else if (mpa->revision == 1) 1350d2fe99e8SKumar Sanghvi if (peer2peer) 1351d2fe99e8SKumar Sanghvi ep->mpa_attr.p2p_type = p2p_type; 1352d2fe99e8SKumar Sanghvi 1353cfdda9d7SSteve Wise PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 1354cfdda9d7SSteve Wise "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__, 1355cfdda9d7SSteve Wise ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 1356cfdda9d7SSteve Wise ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1357cfdda9d7SSteve Wise ep->mpa_attr.p2p_type); 1358cfdda9d7SSteve Wise 1359cfdda9d7SSteve Wise state_set(&ep->com, MPA_REQ_RCVD); 1360cfdda9d7SSteve Wise 1361cfdda9d7SSteve Wise /* drive upcall */ 1362cfdda9d7SSteve Wise connect_request_upcall(ep); 1363cfdda9d7SSteve Wise return; 1364cfdda9d7SSteve Wise } 1365cfdda9d7SSteve Wise 1366cfdda9d7SSteve Wise static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) 1367cfdda9d7SSteve Wise { 1368cfdda9d7SSteve Wise struct c4iw_ep *ep; 1369cfdda9d7SSteve Wise struct cpl_rx_data *hdr = cplhdr(skb); 1370cfdda9d7SSteve Wise unsigned int dlen = ntohs(hdr->len); 1371cfdda9d7SSteve Wise unsigned int tid = GET_TID(hdr); 1372cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1373cfdda9d7SSteve Wise 1374cfdda9d7SSteve Wise ep = lookup_tid(t, tid); 1375cfdda9d7SSteve Wise PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); 1376cfdda9d7SSteve Wise skb_pull(skb, sizeof(*hdr)); 1377cfdda9d7SSteve Wise skb_trim(skb, dlen); 1378cfdda9d7SSteve Wise 1379cfdda9d7SSteve Wise ep->rcv_seq += dlen; 1380cfdda9d7SSteve Wise BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen)); 1381cfdda9d7SSteve Wise 1382cfdda9d7SSteve Wise /* update RX credits */ 1383cfdda9d7SSteve Wise update_rx_credits(ep, dlen); 1384cfdda9d7SSteve Wise 1385cfdda9d7SSteve Wise switch (state_read(&ep->com)) { 1386cfdda9d7SSteve Wise case MPA_REQ_SENT: 1387cfdda9d7SSteve Wise process_mpa_reply(ep, skb); 1388cfdda9d7SSteve Wise break; 1389cfdda9d7SSteve Wise case MPA_REQ_WAIT: 1390cfdda9d7SSteve Wise process_mpa_request(ep, skb); 1391cfdda9d7SSteve Wise break; 1392cfdda9d7SSteve Wise case MPA_REP_SENT: 1393cfdda9d7SSteve Wise break; 1394cfdda9d7SSteve Wise default: 1395cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s Unexpected streaming data." 1396cfdda9d7SSteve Wise " ep %p state %d tid %u\n", 1397cfdda9d7SSteve Wise __func__, ep, state_read(&ep->com), ep->hwtid); 1398cfdda9d7SSteve Wise 1399cfdda9d7SSteve Wise /* 1400cfdda9d7SSteve Wise * The ep will timeout and inform the ULP of the failure. 1401cfdda9d7SSteve Wise * See ep_timeout(). 1402cfdda9d7SSteve Wise */ 1403cfdda9d7SSteve Wise break; 1404cfdda9d7SSteve Wise } 1405cfdda9d7SSteve Wise return 0; 1406cfdda9d7SSteve Wise } 1407cfdda9d7SSteve Wise 1408cfdda9d7SSteve Wise static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1409cfdda9d7SSteve Wise { 1410cfdda9d7SSteve Wise struct c4iw_ep *ep; 1411cfdda9d7SSteve Wise struct cpl_abort_rpl_rss *rpl = cplhdr(skb); 1412cfdda9d7SSteve Wise int release = 0; 1413cfdda9d7SSteve Wise unsigned int tid = GET_TID(rpl); 1414cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1415cfdda9d7SSteve Wise 1416cfdda9d7SSteve Wise ep = lookup_tid(t, tid); 14174984037bSVipul Pandya if (!ep) { 14184984037bSVipul Pandya printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n"); 14194984037bSVipul Pandya return 0; 14204984037bSVipul Pandya } 142192dd6c3dSWei Yongjun PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 14222f5b48c3SSteve Wise mutex_lock(&ep->com.mutex); 1423cfdda9d7SSteve Wise switch (ep->com.state) { 1424cfdda9d7SSteve Wise case ABORTING: 1425cfdda9d7SSteve Wise __state_set(&ep->com, DEAD); 1426cfdda9d7SSteve Wise release = 1; 1427cfdda9d7SSteve Wise break; 1428cfdda9d7SSteve Wise default: 1429cfdda9d7SSteve Wise printk(KERN_ERR "%s ep %p state %d\n", 1430cfdda9d7SSteve Wise __func__, ep, ep->com.state); 1431cfdda9d7SSteve Wise break; 1432cfdda9d7SSteve Wise } 14332f5b48c3SSteve Wise mutex_unlock(&ep->com.mutex); 1434cfdda9d7SSteve Wise 1435cfdda9d7SSteve Wise if (release) 1436cfdda9d7SSteve Wise release_ep_resources(ep); 1437cfdda9d7SSteve Wise return 0; 1438cfdda9d7SSteve Wise } 1439cfdda9d7SSteve Wise 14405be78ee9SVipul Pandya static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) 14415be78ee9SVipul Pandya { 14425be78ee9SVipul Pandya struct sk_buff *skb; 14435be78ee9SVipul Pandya struct fw_ofld_connection_wr *req; 14445be78ee9SVipul Pandya unsigned int mtu_idx; 14455be78ee9SVipul Pandya int wscale; 14465be78ee9SVipul Pandya 14475be78ee9SVipul Pandya skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 14485be78ee9SVipul Pandya req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); 14495be78ee9SVipul Pandya memset(req, 0, sizeof(*req)); 14505be78ee9SVipul Pandya req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); 14515be78ee9SVipul Pandya req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); 14525be78ee9SVipul Pandya req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, 14535be78ee9SVipul Pandya ep->l2t)); 14545be78ee9SVipul Pandya req->le.lport = ep->com.local_addr.sin_port; 14555be78ee9SVipul Pandya req->le.pport = ep->com.remote_addr.sin_port; 14565be78ee9SVipul Pandya req->le.u.ipv4.lip = ep->com.local_addr.sin_addr.s_addr; 14575be78ee9SVipul Pandya req->le.u.ipv4.pip = ep->com.remote_addr.sin_addr.s_addr; 14585be78ee9SVipul Pandya req->tcb.t_state_to_astid = 14595be78ee9SVipul Pandya htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) | 14605be78ee9SVipul Pandya V_FW_OFLD_CONNECTION_WR_ASTID(atid)); 14615be78ee9SVipul Pandya req->tcb.cplrxdataack_cplpassacceptrpl = 14625be78ee9SVipul Pandya htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK); 14635be78ee9SVipul Pandya req->tcb.tx_max = jiffies; 14645be78ee9SVipul Pandya cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 14655be78ee9SVipul Pandya wscale = compute_wscale(rcv_win); 14665be78ee9SVipul Pandya req->tcb.opt0 = TCAM_BYPASS(1) | 14675be78ee9SVipul Pandya (nocong ? NO_CONG(1) : 0) | 14685be78ee9SVipul Pandya KEEP_ALIVE(1) | 14695be78ee9SVipul Pandya DELACK(1) | 14705be78ee9SVipul Pandya WND_SCALE(wscale) | 14715be78ee9SVipul Pandya MSS_IDX(mtu_idx) | 14725be78ee9SVipul Pandya L2T_IDX(ep->l2t->idx) | 14735be78ee9SVipul Pandya TX_CHAN(ep->tx_chan) | 14745be78ee9SVipul Pandya SMAC_SEL(ep->smac_idx) | 14755be78ee9SVipul Pandya DSCP(ep->tos) | 14765be78ee9SVipul Pandya ULP_MODE(ULP_MODE_TCPDDP) | 14775be78ee9SVipul Pandya RCV_BUFSIZ(rcv_win >> 10); 14785be78ee9SVipul Pandya req->tcb.opt2 = PACE(1) | 14795be78ee9SVipul Pandya TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | 14805be78ee9SVipul Pandya RX_CHANNEL(0) | 14815be78ee9SVipul Pandya CCTRL_ECN(enable_ecn) | 14825be78ee9SVipul Pandya RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 14835be78ee9SVipul Pandya if (enable_tcp_timestamps) 14845be78ee9SVipul Pandya req->tcb.opt2 |= TSTAMPS_EN(1); 14855be78ee9SVipul Pandya if (enable_tcp_sack) 14865be78ee9SVipul Pandya req->tcb.opt2 |= SACK_EN(1); 14875be78ee9SVipul Pandya if (wscale && enable_tcp_window_scaling) 14885be78ee9SVipul Pandya req->tcb.opt2 |= WND_SCALE_EN(1); 14895be78ee9SVipul Pandya req->tcb.opt0 = cpu_to_be64(req->tcb.opt0); 14905be78ee9SVipul Pandya req->tcb.opt2 = cpu_to_be32(req->tcb.opt2); 14915be78ee9SVipul Pandya set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); 14925be78ee9SVipul Pandya c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 14935be78ee9SVipul Pandya } 14945be78ee9SVipul Pandya 1495cfdda9d7SSteve Wise /* 1496cfdda9d7SSteve Wise * Return whether a failed active open has allocated a TID 1497cfdda9d7SSteve Wise */ 1498cfdda9d7SSteve Wise static inline int act_open_has_tid(int status) 1499cfdda9d7SSteve Wise { 1500cfdda9d7SSteve Wise return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && 1501cfdda9d7SSteve Wise status != CPL_ERR_ARP_MISS; 1502cfdda9d7SSteve Wise } 1503cfdda9d7SSteve Wise 1504cfdda9d7SSteve Wise static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1505cfdda9d7SSteve Wise { 1506cfdda9d7SSteve Wise struct c4iw_ep *ep; 1507cfdda9d7SSteve Wise struct cpl_act_open_rpl *rpl = cplhdr(skb); 1508cfdda9d7SSteve Wise unsigned int atid = GET_TID_TID(GET_AOPEN_ATID( 1509cfdda9d7SSteve Wise ntohl(rpl->atid_status))); 1510cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1511cfdda9d7SSteve Wise int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status)); 1512cfdda9d7SSteve Wise 1513cfdda9d7SSteve Wise ep = lookup_atid(t, atid); 1514cfdda9d7SSteve Wise 1515cfdda9d7SSteve Wise PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, 1516cfdda9d7SSteve Wise status, status2errno(status)); 1517cfdda9d7SSteve Wise 1518cfdda9d7SSteve Wise if (status == CPL_ERR_RTX_NEG_ADVICE) { 1519cfdda9d7SSteve Wise printk(KERN_WARNING MOD "Connection problems for atid %u\n", 1520cfdda9d7SSteve Wise atid); 1521cfdda9d7SSteve Wise return 0; 1522cfdda9d7SSteve Wise } 1523cfdda9d7SSteve Wise 1524d716a2a0SVipul Pandya /* 1525d716a2a0SVipul Pandya * Log interesting failures. 1526d716a2a0SVipul Pandya */ 1527d716a2a0SVipul Pandya switch (status) { 1528d716a2a0SVipul Pandya case CPL_ERR_CONN_RESET: 1529d716a2a0SVipul Pandya case CPL_ERR_CONN_TIMEDOUT: 1530d716a2a0SVipul Pandya break; 15315be78ee9SVipul Pandya case CPL_ERR_TCAM_FULL: 15325be78ee9SVipul Pandya mutex_lock(&dev->rdev.stats.lock); 15335be78ee9SVipul Pandya dev->rdev.stats.tcam_full++; 15345be78ee9SVipul Pandya mutex_unlock(&dev->rdev.stats.lock); 15355be78ee9SVipul Pandya send_fw_act_open_req(ep, 15365be78ee9SVipul Pandya GET_TID_TID(GET_AOPEN_ATID(ntohl(rpl->atid_status)))); 15375be78ee9SVipul Pandya return 0; 15385be78ee9SVipul Pandya break; 1539d716a2a0SVipul Pandya default: 1540d716a2a0SVipul Pandya printk(KERN_INFO MOD "Active open failure - " 1541d716a2a0SVipul Pandya "atid %u status %u errno %d %pI4:%u->%pI4:%u\n", 1542d716a2a0SVipul Pandya atid, status, status2errno(status), 1543d716a2a0SVipul Pandya &ep->com.local_addr.sin_addr.s_addr, 1544d716a2a0SVipul Pandya ntohs(ep->com.local_addr.sin_port), 1545d716a2a0SVipul Pandya &ep->com.remote_addr.sin_addr.s_addr, 1546d716a2a0SVipul Pandya ntohs(ep->com.remote_addr.sin_port)); 1547d716a2a0SVipul Pandya break; 1548d716a2a0SVipul Pandya } 1549d716a2a0SVipul Pandya 1550cfdda9d7SSteve Wise connect_reply_upcall(ep, status2errno(status)); 1551cfdda9d7SSteve Wise state_set(&ep->com, DEAD); 1552cfdda9d7SSteve Wise 1553cfdda9d7SSteve Wise if (status && act_open_has_tid(status)) 1554cfdda9d7SSteve Wise cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); 1555cfdda9d7SSteve Wise 1556cfdda9d7SSteve Wise cxgb4_free_atid(t, atid); 1557cfdda9d7SSteve Wise dst_release(ep->dst); 1558cfdda9d7SSteve Wise cxgb4_l2t_release(ep->l2t); 1559cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 1560cfdda9d7SSteve Wise 1561cfdda9d7SSteve Wise return 0; 1562cfdda9d7SSteve Wise } 1563cfdda9d7SSteve Wise 1564cfdda9d7SSteve Wise static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1565cfdda9d7SSteve Wise { 1566cfdda9d7SSteve Wise struct cpl_pass_open_rpl *rpl = cplhdr(skb); 1567cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1568cfdda9d7SSteve Wise unsigned int stid = GET_TID(rpl); 1569cfdda9d7SSteve Wise struct c4iw_listen_ep *ep = lookup_stid(t, stid); 1570cfdda9d7SSteve Wise 1571cfdda9d7SSteve Wise if (!ep) { 1572cfdda9d7SSteve Wise printk(KERN_ERR MOD "stid %d lookup failure!\n", stid); 1573cfdda9d7SSteve Wise return 0; 1574cfdda9d7SSteve Wise } 1575cfdda9d7SSteve Wise PDBG("%s ep %p status %d error %d\n", __func__, ep, 1576cfdda9d7SSteve Wise rpl->status, status2errno(rpl->status)); 1577d9594d99SSteve Wise c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); 1578cfdda9d7SSteve Wise 1579cfdda9d7SSteve Wise return 0; 1580cfdda9d7SSteve Wise } 1581cfdda9d7SSteve Wise 1582cfdda9d7SSteve Wise static int listen_stop(struct c4iw_listen_ep *ep) 1583cfdda9d7SSteve Wise { 1584cfdda9d7SSteve Wise struct sk_buff *skb; 1585cfdda9d7SSteve Wise struct cpl_close_listsvr_req *req; 1586cfdda9d7SSteve Wise 1587cfdda9d7SSteve Wise PDBG("%s ep %p\n", __func__, ep); 1588cfdda9d7SSteve Wise skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1589cfdda9d7SSteve Wise if (!skb) { 1590cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); 1591cfdda9d7SSteve Wise return -ENOMEM; 1592cfdda9d7SSteve Wise } 1593cfdda9d7SSteve Wise req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req)); 1594cfdda9d7SSteve Wise INIT_TP_WR(req, 0); 1595cfdda9d7SSteve Wise OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, 1596cfdda9d7SSteve Wise ep->stid)); 1597cfdda9d7SSteve Wise req->reply_ctrl = cpu_to_be16( 1598cfdda9d7SSteve Wise QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0])); 1599cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); 1600cfdda9d7SSteve Wise return c4iw_ofld_send(&ep->com.dev->rdev, skb); 1601cfdda9d7SSteve Wise } 1602cfdda9d7SSteve Wise 1603cfdda9d7SSteve Wise static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1604cfdda9d7SSteve Wise { 1605cfdda9d7SSteve Wise struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); 1606cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1607cfdda9d7SSteve Wise unsigned int stid = GET_TID(rpl); 1608cfdda9d7SSteve Wise struct c4iw_listen_ep *ep = lookup_stid(t, stid); 1609cfdda9d7SSteve Wise 1610cfdda9d7SSteve Wise PDBG("%s ep %p\n", __func__, ep); 1611d9594d99SSteve Wise c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); 1612cfdda9d7SSteve Wise return 0; 1613cfdda9d7SSteve Wise } 1614cfdda9d7SSteve Wise 1615cfdda9d7SSteve Wise static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb, 1616cfdda9d7SSteve Wise struct cpl_pass_accept_req *req) 1617cfdda9d7SSteve Wise { 1618cfdda9d7SSteve Wise struct cpl_pass_accept_rpl *rpl; 1619cfdda9d7SSteve Wise unsigned int mtu_idx; 1620cfdda9d7SSteve Wise u64 opt0; 1621cfdda9d7SSteve Wise u32 opt2; 1622cfdda9d7SSteve Wise int wscale; 1623cfdda9d7SSteve Wise 1624cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1625cfdda9d7SSteve Wise BUG_ON(skb_cloned(skb)); 1626cfdda9d7SSteve Wise skb_trim(skb, sizeof(*rpl)); 1627cfdda9d7SSteve Wise skb_get(skb); 1628cfdda9d7SSteve Wise cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 1629cfdda9d7SSteve Wise wscale = compute_wscale(rcv_win); 16305be78ee9SVipul Pandya opt0 = (nocong ? NO_CONG(1) : 0) | 16315be78ee9SVipul Pandya KEEP_ALIVE(1) | 1632ba6d3925SSteve Wise DELACK(1) | 1633cfdda9d7SSteve Wise WND_SCALE(wscale) | 1634cfdda9d7SSteve Wise MSS_IDX(mtu_idx) | 1635cfdda9d7SSteve Wise L2T_IDX(ep->l2t->idx) | 1636cfdda9d7SSteve Wise TX_CHAN(ep->tx_chan) | 1637cfdda9d7SSteve Wise SMAC_SEL(ep->smac_idx) | 16385be78ee9SVipul Pandya DSCP(ep->tos >> 2) | 1639b48f3b9cSSteve Wise ULP_MODE(ULP_MODE_TCPDDP) | 1640cfdda9d7SSteve Wise RCV_BUFSIZ(rcv_win>>10); 1641cfdda9d7SSteve Wise opt2 = RX_CHANNEL(0) | 1642cfdda9d7SSteve Wise RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 1643cfdda9d7SSteve Wise 1644cfdda9d7SSteve Wise if (enable_tcp_timestamps && req->tcpopt.tstamp) 1645cfdda9d7SSteve Wise opt2 |= TSTAMPS_EN(1); 1646cfdda9d7SSteve Wise if (enable_tcp_sack && req->tcpopt.sack) 1647cfdda9d7SSteve Wise opt2 |= SACK_EN(1); 1648cfdda9d7SSteve Wise if (wscale && enable_tcp_window_scaling) 1649cfdda9d7SSteve Wise opt2 |= WND_SCALE_EN(1); 16505be78ee9SVipul Pandya if (enable_ecn) { 16515be78ee9SVipul Pandya const struct tcphdr *tcph; 16525be78ee9SVipul Pandya u32 hlen = ntohl(req->hdr_len); 16535be78ee9SVipul Pandya 16545be78ee9SVipul Pandya tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) + 16555be78ee9SVipul Pandya G_IP_HDR_LEN(hlen); 16565be78ee9SVipul Pandya if (tcph->ece && tcph->cwr) 16575be78ee9SVipul Pandya opt2 |= CCTRL_ECN(1); 16585be78ee9SVipul Pandya } 1659cfdda9d7SSteve Wise 1660cfdda9d7SSteve Wise rpl = cplhdr(skb); 1661cfdda9d7SSteve Wise INIT_TP_WR(rpl, ep->hwtid); 1662cfdda9d7SSteve Wise OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, 1663cfdda9d7SSteve Wise ep->hwtid)); 1664cfdda9d7SSteve Wise rpl->opt0 = cpu_to_be64(opt0); 1665cfdda9d7SSteve Wise rpl->opt2 = cpu_to_be32(opt2); 1666d4f1a5c6SSteve Wise set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 1667cfdda9d7SSteve Wise c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1668cfdda9d7SSteve Wise 1669cfdda9d7SSteve Wise return; 1670cfdda9d7SSteve Wise } 1671cfdda9d7SSteve Wise 1672cfdda9d7SSteve Wise static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip, 1673cfdda9d7SSteve Wise struct sk_buff *skb) 1674cfdda9d7SSteve Wise { 1675cfdda9d7SSteve Wise PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid, 1676cfdda9d7SSteve Wise peer_ip); 1677cfdda9d7SSteve Wise BUG_ON(skb_cloned(skb)); 1678cfdda9d7SSteve Wise skb_trim(skb, sizeof(struct cpl_tid_release)); 1679cfdda9d7SSteve Wise skb_get(skb); 1680cfdda9d7SSteve Wise release_tid(&dev->rdev, hwtid, skb); 1681cfdda9d7SSteve Wise return; 1682cfdda9d7SSteve Wise } 1683cfdda9d7SSteve Wise 1684cfdda9d7SSteve Wise static void get_4tuple(struct cpl_pass_accept_req *req, 1685cfdda9d7SSteve Wise __be32 *local_ip, __be32 *peer_ip, 1686cfdda9d7SSteve Wise __be16 *local_port, __be16 *peer_port) 1687cfdda9d7SSteve Wise { 1688cfdda9d7SSteve Wise int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len)); 1689cfdda9d7SSteve Wise int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len)); 1690cfdda9d7SSteve Wise struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len); 1691cfdda9d7SSteve Wise struct tcphdr *tcp = (struct tcphdr *) 1692cfdda9d7SSteve Wise ((u8 *)(req + 1) + eth_len + ip_len); 1693cfdda9d7SSteve Wise 1694cfdda9d7SSteve Wise PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__, 1695cfdda9d7SSteve Wise ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source), 1696cfdda9d7SSteve Wise ntohs(tcp->dest)); 1697cfdda9d7SSteve Wise 1698cfdda9d7SSteve Wise *peer_ip = ip->saddr; 1699cfdda9d7SSteve Wise *local_ip = ip->daddr; 1700cfdda9d7SSteve Wise *peer_port = tcp->source; 1701cfdda9d7SSteve Wise *local_port = tcp->dest; 1702cfdda9d7SSteve Wise 1703cfdda9d7SSteve Wise return; 1704cfdda9d7SSteve Wise } 1705cfdda9d7SSteve Wise 17063786cf18SDavid Miller static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst, 17073786cf18SDavid Miller struct c4iw_dev *cdev, bool clear_mpa_v1) 17083786cf18SDavid Miller { 17093786cf18SDavid Miller struct neighbour *n; 17103786cf18SDavid Miller int err, step; 17113786cf18SDavid Miller 171264b7007eSDavid Miller n = dst_neigh_lookup(dst, &peer_ip); 17133786cf18SDavid Miller if (!n) 171464b7007eSDavid Miller return -ENODEV; 171564b7007eSDavid Miller 171664b7007eSDavid Miller rcu_read_lock(); 17173786cf18SDavid Miller err = -ENOMEM; 17183786cf18SDavid Miller if (n->dev->flags & IFF_LOOPBACK) { 17193786cf18SDavid Miller struct net_device *pdev; 17203786cf18SDavid Miller 17213786cf18SDavid Miller pdev = ip_dev_find(&init_net, peer_ip); 172271b43fd5SThadeu Lima de Souza Cascardo if (!pdev) { 172371b43fd5SThadeu Lima de Souza Cascardo err = -ENODEV; 172471b43fd5SThadeu Lima de Souza Cascardo goto out; 172571b43fd5SThadeu Lima de Souza Cascardo } 17263786cf18SDavid Miller ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, 17273786cf18SDavid Miller n, pdev, 0); 17283786cf18SDavid Miller if (!ep->l2t) 17293786cf18SDavid Miller goto out; 17303786cf18SDavid Miller ep->mtu = pdev->mtu; 17313786cf18SDavid Miller ep->tx_chan = cxgb4_port_chan(pdev); 17323786cf18SDavid Miller ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; 17333786cf18SDavid Miller step = cdev->rdev.lldi.ntxq / 17343786cf18SDavid Miller cdev->rdev.lldi.nchan; 17353786cf18SDavid Miller ep->txq_idx = cxgb4_port_idx(pdev) * step; 17363786cf18SDavid Miller step = cdev->rdev.lldi.nrxq / 17373786cf18SDavid Miller cdev->rdev.lldi.nchan; 17383786cf18SDavid Miller ep->ctrlq_idx = cxgb4_port_idx(pdev); 17393786cf18SDavid Miller ep->rss_qid = cdev->rdev.lldi.rxq_ids[ 17403786cf18SDavid Miller cxgb4_port_idx(pdev) * step]; 17413786cf18SDavid Miller dev_put(pdev); 17423786cf18SDavid Miller } else { 17433786cf18SDavid Miller ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, 17443786cf18SDavid Miller n, n->dev, 0); 17453786cf18SDavid Miller if (!ep->l2t) 17463786cf18SDavid Miller goto out; 1747bd61baafSSteve Wise ep->mtu = dst_mtu(dst); 17483786cf18SDavid Miller ep->tx_chan = cxgb4_port_chan(n->dev); 17493786cf18SDavid Miller ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1; 17503786cf18SDavid Miller step = cdev->rdev.lldi.ntxq / 17513786cf18SDavid Miller cdev->rdev.lldi.nchan; 17523786cf18SDavid Miller ep->txq_idx = cxgb4_port_idx(n->dev) * step; 17533786cf18SDavid Miller ep->ctrlq_idx = cxgb4_port_idx(n->dev); 17543786cf18SDavid Miller step = cdev->rdev.lldi.nrxq / 17553786cf18SDavid Miller cdev->rdev.lldi.nchan; 17563786cf18SDavid Miller ep->rss_qid = cdev->rdev.lldi.rxq_ids[ 17573786cf18SDavid Miller cxgb4_port_idx(n->dev) * step]; 17583786cf18SDavid Miller 17593786cf18SDavid Miller if (clear_mpa_v1) { 17603786cf18SDavid Miller ep->retry_with_mpa_v1 = 0; 17613786cf18SDavid Miller ep->tried_with_mpa_v1 = 0; 17623786cf18SDavid Miller } 17633786cf18SDavid Miller } 17643786cf18SDavid Miller err = 0; 17653786cf18SDavid Miller out: 17663786cf18SDavid Miller rcu_read_unlock(); 17673786cf18SDavid Miller 176864b7007eSDavid Miller neigh_release(n); 176964b7007eSDavid Miller 17703786cf18SDavid Miller return err; 17713786cf18SDavid Miller } 17723786cf18SDavid Miller 1773cfdda9d7SSteve Wise static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) 1774cfdda9d7SSteve Wise { 1775cfdda9d7SSteve Wise struct c4iw_ep *child_ep, *parent_ep; 1776cfdda9d7SSteve Wise struct cpl_pass_accept_req *req = cplhdr(skb); 1777cfdda9d7SSteve Wise unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); 1778cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1779cfdda9d7SSteve Wise unsigned int hwtid = GET_TID(req); 1780cfdda9d7SSteve Wise struct dst_entry *dst; 1781cfdda9d7SSteve Wise struct rtable *rt; 1782cfdda9d7SSteve Wise __be32 local_ip, peer_ip; 1783cfdda9d7SSteve Wise __be16 local_port, peer_port; 17843786cf18SDavid Miller int err; 1785cfdda9d7SSteve Wise 1786cfdda9d7SSteve Wise parent_ep = lookup_stid(t, stid); 1787cfdda9d7SSteve Wise PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid); 1788cfdda9d7SSteve Wise 1789cfdda9d7SSteve Wise get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port); 1790cfdda9d7SSteve Wise 1791cfdda9d7SSteve Wise if (state_read(&parent_ep->com) != LISTEN) { 1792cfdda9d7SSteve Wise printk(KERN_ERR "%s - listening ep not in LISTEN\n", 1793cfdda9d7SSteve Wise __func__); 1794cfdda9d7SSteve Wise goto reject; 1795cfdda9d7SSteve Wise } 1796cfdda9d7SSteve Wise 1797cfdda9d7SSteve Wise /* Find output route */ 1798cfdda9d7SSteve Wise rt = find_route(dev, local_ip, peer_ip, local_port, peer_port, 1799cfdda9d7SSteve Wise GET_POPEN_TOS(ntohl(req->tos_stid))); 1800cfdda9d7SSteve Wise if (!rt) { 1801cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - failed to find dst entry!\n", 1802cfdda9d7SSteve Wise __func__); 1803cfdda9d7SSteve Wise goto reject; 1804cfdda9d7SSteve Wise } 1805d8d1f30bSChangli Gao dst = &rt->dst; 1806cfdda9d7SSteve Wise 1807cfdda9d7SSteve Wise child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); 1808cfdda9d7SSteve Wise if (!child_ep) { 1809cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", 1810cfdda9d7SSteve Wise __func__); 1811cfdda9d7SSteve Wise dst_release(dst); 1812cfdda9d7SSteve Wise goto reject; 1813cfdda9d7SSteve Wise } 18143786cf18SDavid Miller 18153786cf18SDavid Miller err = import_ep(child_ep, peer_ip, dst, dev, false); 18163786cf18SDavid Miller if (err) { 18173786cf18SDavid Miller printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", 18183786cf18SDavid Miller __func__); 18193786cf18SDavid Miller dst_release(dst); 18203786cf18SDavid Miller kfree(child_ep); 18213786cf18SDavid Miller goto reject; 18223786cf18SDavid Miller } 18233786cf18SDavid Miller 1824cfdda9d7SSteve Wise state_set(&child_ep->com, CONNECTING); 1825cfdda9d7SSteve Wise child_ep->com.dev = dev; 1826cfdda9d7SSteve Wise child_ep->com.cm_id = NULL; 1827cfdda9d7SSteve Wise child_ep->com.local_addr.sin_family = PF_INET; 1828cfdda9d7SSteve Wise child_ep->com.local_addr.sin_port = local_port; 1829cfdda9d7SSteve Wise child_ep->com.local_addr.sin_addr.s_addr = local_ip; 1830cfdda9d7SSteve Wise child_ep->com.remote_addr.sin_family = PF_INET; 1831cfdda9d7SSteve Wise child_ep->com.remote_addr.sin_port = peer_port; 1832cfdda9d7SSteve Wise child_ep->com.remote_addr.sin_addr.s_addr = peer_ip; 1833cfdda9d7SSteve Wise c4iw_get_ep(&parent_ep->com); 1834cfdda9d7SSteve Wise child_ep->parent_ep = parent_ep; 1835cfdda9d7SSteve Wise child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid)); 1836cfdda9d7SSteve Wise child_ep->dst = dst; 1837cfdda9d7SSteve Wise child_ep->hwtid = hwtid; 1838cfdda9d7SSteve Wise 1839cfdda9d7SSteve Wise PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__, 18403786cf18SDavid Miller child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid); 1841cfdda9d7SSteve Wise 1842cfdda9d7SSteve Wise init_timer(&child_ep->timer); 1843cfdda9d7SSteve Wise cxgb4_insert_tid(t, child_ep, hwtid); 1844cfdda9d7SSteve Wise accept_cr(child_ep, peer_ip, skb, req); 1845cfdda9d7SSteve Wise goto out; 1846cfdda9d7SSteve Wise reject: 1847cfdda9d7SSteve Wise reject_cr(dev, hwtid, peer_ip, skb); 1848cfdda9d7SSteve Wise out: 1849cfdda9d7SSteve Wise return 0; 1850cfdda9d7SSteve Wise } 1851cfdda9d7SSteve Wise 1852cfdda9d7SSteve Wise static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) 1853cfdda9d7SSteve Wise { 1854cfdda9d7SSteve Wise struct c4iw_ep *ep; 1855cfdda9d7SSteve Wise struct cpl_pass_establish *req = cplhdr(skb); 1856cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1857cfdda9d7SSteve Wise unsigned int tid = GET_TID(req); 1858cfdda9d7SSteve Wise 1859cfdda9d7SSteve Wise ep = lookup_tid(t, tid); 1860cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1861cfdda9d7SSteve Wise ep->snd_seq = be32_to_cpu(req->snd_isn); 1862cfdda9d7SSteve Wise ep->rcv_seq = be32_to_cpu(req->rcv_isn); 1863cfdda9d7SSteve Wise 1864cfdda9d7SSteve Wise set_emss(ep, ntohs(req->tcp_opt)); 1865cfdda9d7SSteve Wise 1866cfdda9d7SSteve Wise dst_confirm(ep->dst); 1867cfdda9d7SSteve Wise state_set(&ep->com, MPA_REQ_WAIT); 1868cfdda9d7SSteve Wise start_ep_timer(ep); 1869cfdda9d7SSteve Wise send_flowc(ep, skb); 1870cfdda9d7SSteve Wise 1871cfdda9d7SSteve Wise return 0; 1872cfdda9d7SSteve Wise } 1873cfdda9d7SSteve Wise 1874cfdda9d7SSteve Wise static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) 1875cfdda9d7SSteve Wise { 1876cfdda9d7SSteve Wise struct cpl_peer_close *hdr = cplhdr(skb); 1877cfdda9d7SSteve Wise struct c4iw_ep *ep; 1878cfdda9d7SSteve Wise struct c4iw_qp_attributes attrs; 1879cfdda9d7SSteve Wise int disconnect = 1; 1880cfdda9d7SSteve Wise int release = 0; 1881cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 1882cfdda9d7SSteve Wise unsigned int tid = GET_TID(hdr); 18838da7e7a5SSteve Wise int ret; 1884cfdda9d7SSteve Wise 1885cfdda9d7SSteve Wise ep = lookup_tid(t, tid); 1886cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1887cfdda9d7SSteve Wise dst_confirm(ep->dst); 1888cfdda9d7SSteve Wise 18892f5b48c3SSteve Wise mutex_lock(&ep->com.mutex); 1890cfdda9d7SSteve Wise switch (ep->com.state) { 1891cfdda9d7SSteve Wise case MPA_REQ_WAIT: 1892cfdda9d7SSteve Wise __state_set(&ep->com, CLOSING); 1893cfdda9d7SSteve Wise break; 1894cfdda9d7SSteve Wise case MPA_REQ_SENT: 1895cfdda9d7SSteve Wise __state_set(&ep->com, CLOSING); 1896cfdda9d7SSteve Wise connect_reply_upcall(ep, -ECONNRESET); 1897cfdda9d7SSteve Wise break; 1898cfdda9d7SSteve Wise case MPA_REQ_RCVD: 1899cfdda9d7SSteve Wise 1900cfdda9d7SSteve Wise /* 1901cfdda9d7SSteve Wise * We're gonna mark this puppy DEAD, but keep 1902cfdda9d7SSteve Wise * the reference on it until the ULP accepts or 1903cfdda9d7SSteve Wise * rejects the CR. Also wake up anyone waiting 1904cfdda9d7SSteve Wise * in rdma connection migration (see c4iw_accept_cr()). 1905cfdda9d7SSteve Wise */ 1906cfdda9d7SSteve Wise __state_set(&ep->com, CLOSING); 1907cfdda9d7SSteve Wise PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 1908d9594d99SSteve Wise c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 1909cfdda9d7SSteve Wise break; 1910cfdda9d7SSteve Wise case MPA_REP_SENT: 1911cfdda9d7SSteve Wise __state_set(&ep->com, CLOSING); 1912cfdda9d7SSteve Wise PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 1913d9594d99SSteve Wise c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 1914cfdda9d7SSteve Wise break; 1915cfdda9d7SSteve Wise case FPDU_MODE: 1916ca5a2202SSteve Wise start_ep_timer(ep); 1917cfdda9d7SSteve Wise __state_set(&ep->com, CLOSING); 191830c95c2dSSteve Wise attrs.next_state = C4IW_QP_STATE_CLOSING; 19198da7e7a5SSteve Wise ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 192030c95c2dSSteve Wise C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 19218da7e7a5SSteve Wise if (ret != -ECONNRESET) { 1922cfdda9d7SSteve Wise peer_close_upcall(ep); 192330c95c2dSSteve Wise disconnect = 1; 19248da7e7a5SSteve Wise } 1925cfdda9d7SSteve Wise break; 1926cfdda9d7SSteve Wise case ABORTING: 1927cfdda9d7SSteve Wise disconnect = 0; 1928cfdda9d7SSteve Wise break; 1929cfdda9d7SSteve Wise case CLOSING: 1930cfdda9d7SSteve Wise __state_set(&ep->com, MORIBUND); 1931cfdda9d7SSteve Wise disconnect = 0; 1932cfdda9d7SSteve Wise break; 1933cfdda9d7SSteve Wise case MORIBUND: 1934ca5a2202SSteve Wise stop_ep_timer(ep); 1935cfdda9d7SSteve Wise if (ep->com.cm_id && ep->com.qp) { 1936cfdda9d7SSteve Wise attrs.next_state = C4IW_QP_STATE_IDLE; 1937cfdda9d7SSteve Wise c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1938cfdda9d7SSteve Wise C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1939cfdda9d7SSteve Wise } 1940cfdda9d7SSteve Wise close_complete_upcall(ep); 1941cfdda9d7SSteve Wise __state_set(&ep->com, DEAD); 1942cfdda9d7SSteve Wise release = 1; 1943cfdda9d7SSteve Wise disconnect = 0; 1944cfdda9d7SSteve Wise break; 1945cfdda9d7SSteve Wise case DEAD: 1946cfdda9d7SSteve Wise disconnect = 0; 1947cfdda9d7SSteve Wise break; 1948cfdda9d7SSteve Wise default: 1949cfdda9d7SSteve Wise BUG_ON(1); 1950cfdda9d7SSteve Wise } 19512f5b48c3SSteve Wise mutex_unlock(&ep->com.mutex); 1952cfdda9d7SSteve Wise if (disconnect) 1953cfdda9d7SSteve Wise c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 1954cfdda9d7SSteve Wise if (release) 1955cfdda9d7SSteve Wise release_ep_resources(ep); 1956cfdda9d7SSteve Wise return 0; 1957cfdda9d7SSteve Wise } 1958cfdda9d7SSteve Wise 1959cfdda9d7SSteve Wise /* 1960cfdda9d7SSteve Wise * Returns whether an ABORT_REQ_RSS message is a negative advice. 1961cfdda9d7SSteve Wise */ 1962cfdda9d7SSteve Wise static int is_neg_adv_abort(unsigned int status) 1963cfdda9d7SSteve Wise { 1964cfdda9d7SSteve Wise return status == CPL_ERR_RTX_NEG_ADVICE || 1965cfdda9d7SSteve Wise status == CPL_ERR_PERSIST_NEG_ADVICE; 1966cfdda9d7SSteve Wise } 1967cfdda9d7SSteve Wise 1968d2fe99e8SKumar Sanghvi static int c4iw_reconnect(struct c4iw_ep *ep) 1969d2fe99e8SKumar Sanghvi { 1970d2fe99e8SKumar Sanghvi struct rtable *rt; 19713786cf18SDavid Miller int err = 0; 1972d2fe99e8SKumar Sanghvi 1973d2fe99e8SKumar Sanghvi PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); 1974d2fe99e8SKumar Sanghvi init_timer(&ep->timer); 1975d2fe99e8SKumar Sanghvi 1976d2fe99e8SKumar Sanghvi /* 1977d2fe99e8SKumar Sanghvi * Allocate an active TID to initiate a TCP connection. 1978d2fe99e8SKumar Sanghvi */ 1979d2fe99e8SKumar Sanghvi ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep); 1980d2fe99e8SKumar Sanghvi if (ep->atid == -1) { 1981d2fe99e8SKumar Sanghvi printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); 1982d2fe99e8SKumar Sanghvi err = -ENOMEM; 1983d2fe99e8SKumar Sanghvi goto fail2; 1984d2fe99e8SKumar Sanghvi } 1985d2fe99e8SKumar Sanghvi 1986d2fe99e8SKumar Sanghvi /* find a route */ 1987d2fe99e8SKumar Sanghvi rt = find_route(ep->com.dev, 1988d2fe99e8SKumar Sanghvi ep->com.cm_id->local_addr.sin_addr.s_addr, 1989d2fe99e8SKumar Sanghvi ep->com.cm_id->remote_addr.sin_addr.s_addr, 1990d2fe99e8SKumar Sanghvi ep->com.cm_id->local_addr.sin_port, 1991d2fe99e8SKumar Sanghvi ep->com.cm_id->remote_addr.sin_port, 0); 1992d2fe99e8SKumar Sanghvi if (!rt) { 1993d2fe99e8SKumar Sanghvi printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); 1994d2fe99e8SKumar Sanghvi err = -EHOSTUNREACH; 1995d2fe99e8SKumar Sanghvi goto fail3; 1996d2fe99e8SKumar Sanghvi } 1997d2fe99e8SKumar Sanghvi ep->dst = &rt->dst; 1998d2fe99e8SKumar Sanghvi 19993786cf18SDavid Miller err = import_ep(ep, ep->com.cm_id->remote_addr.sin_addr.s_addr, 20003786cf18SDavid Miller ep->dst, ep->com.dev, false); 20013786cf18SDavid Miller if (err) { 2002d2fe99e8SKumar Sanghvi printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 2003d2fe99e8SKumar Sanghvi goto fail4; 2004d2fe99e8SKumar Sanghvi } 2005d2fe99e8SKumar Sanghvi 2006d2fe99e8SKumar Sanghvi PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 2007d2fe99e8SKumar Sanghvi __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 2008d2fe99e8SKumar Sanghvi ep->l2t->idx); 2009d2fe99e8SKumar Sanghvi 2010d2fe99e8SKumar Sanghvi state_set(&ep->com, CONNECTING); 2011d2fe99e8SKumar Sanghvi ep->tos = 0; 2012d2fe99e8SKumar Sanghvi 2013d2fe99e8SKumar Sanghvi /* send connect request to rnic */ 2014d2fe99e8SKumar Sanghvi err = send_connect(ep); 2015d2fe99e8SKumar Sanghvi if (!err) 2016d2fe99e8SKumar Sanghvi goto out; 2017d2fe99e8SKumar Sanghvi 2018d2fe99e8SKumar Sanghvi cxgb4_l2t_release(ep->l2t); 2019d2fe99e8SKumar Sanghvi fail4: 2020d2fe99e8SKumar Sanghvi dst_release(ep->dst); 2021d2fe99e8SKumar Sanghvi fail3: 2022d2fe99e8SKumar Sanghvi cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 2023d2fe99e8SKumar Sanghvi fail2: 2024d2fe99e8SKumar Sanghvi /* 2025d2fe99e8SKumar Sanghvi * remember to send notification to upper layer. 2026d2fe99e8SKumar Sanghvi * We are in here so the upper layer is not aware that this is 2027d2fe99e8SKumar Sanghvi * re-connect attempt and so, upper layer is still waiting for 2028d2fe99e8SKumar Sanghvi * response of 1st connect request. 2029d2fe99e8SKumar Sanghvi */ 2030d2fe99e8SKumar Sanghvi connect_reply_upcall(ep, -ECONNRESET); 2031d2fe99e8SKumar Sanghvi c4iw_put_ep(&ep->com); 2032d2fe99e8SKumar Sanghvi out: 2033d2fe99e8SKumar Sanghvi return err; 2034d2fe99e8SKumar Sanghvi } 2035d2fe99e8SKumar Sanghvi 2036cfdda9d7SSteve Wise static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) 2037cfdda9d7SSteve Wise { 2038cfdda9d7SSteve Wise struct cpl_abort_req_rss *req = cplhdr(skb); 2039cfdda9d7SSteve Wise struct c4iw_ep *ep; 2040cfdda9d7SSteve Wise struct cpl_abort_rpl *rpl; 2041cfdda9d7SSteve Wise struct sk_buff *rpl_skb; 2042cfdda9d7SSteve Wise struct c4iw_qp_attributes attrs; 2043cfdda9d7SSteve Wise int ret; 2044cfdda9d7SSteve Wise int release = 0; 2045cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 2046cfdda9d7SSteve Wise unsigned int tid = GET_TID(req); 2047cfdda9d7SSteve Wise 2048cfdda9d7SSteve Wise ep = lookup_tid(t, tid); 2049cfdda9d7SSteve Wise if (is_neg_adv_abort(req->status)) { 2050cfdda9d7SSteve Wise PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, 2051cfdda9d7SSteve Wise ep->hwtid); 2052cfdda9d7SSteve Wise return 0; 2053cfdda9d7SSteve Wise } 2054cfdda9d7SSteve Wise PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 2055cfdda9d7SSteve Wise ep->com.state); 20562f5b48c3SSteve Wise 20572f5b48c3SSteve Wise /* 20582f5b48c3SSteve Wise * Wake up any threads in rdma_init() or rdma_fini(). 2059d2fe99e8SKumar Sanghvi * However, this is not needed if com state is just 2060d2fe99e8SKumar Sanghvi * MPA_REQ_SENT 20612f5b48c3SSteve Wise */ 2062d2fe99e8SKumar Sanghvi if (ep->com.state != MPA_REQ_SENT) 2063d9594d99SSteve Wise c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 20642f5b48c3SSteve Wise 20652f5b48c3SSteve Wise mutex_lock(&ep->com.mutex); 2066cfdda9d7SSteve Wise switch (ep->com.state) { 2067cfdda9d7SSteve Wise case CONNECTING: 2068cfdda9d7SSteve Wise break; 2069cfdda9d7SSteve Wise case MPA_REQ_WAIT: 2070ca5a2202SSteve Wise stop_ep_timer(ep); 2071cfdda9d7SSteve Wise break; 2072cfdda9d7SSteve Wise case MPA_REQ_SENT: 2073ca5a2202SSteve Wise stop_ep_timer(ep); 2074d2fe99e8SKumar Sanghvi if (mpa_rev == 2 && ep->tried_with_mpa_v1) 2075cfdda9d7SSteve Wise connect_reply_upcall(ep, -ECONNRESET); 2076d2fe99e8SKumar Sanghvi else { 2077d2fe99e8SKumar Sanghvi /* 2078d2fe99e8SKumar Sanghvi * we just don't send notification upwards because we 2079d2fe99e8SKumar Sanghvi * want to retry with mpa_v1 without upper layers even 2080d2fe99e8SKumar Sanghvi * knowing it. 2081d2fe99e8SKumar Sanghvi * 2082d2fe99e8SKumar Sanghvi * do some housekeeping so as to re-initiate the 2083d2fe99e8SKumar Sanghvi * connection 2084d2fe99e8SKumar Sanghvi */ 2085d2fe99e8SKumar Sanghvi PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__, 2086d2fe99e8SKumar Sanghvi mpa_rev); 2087d2fe99e8SKumar Sanghvi ep->retry_with_mpa_v1 = 1; 2088d2fe99e8SKumar Sanghvi } 2089cfdda9d7SSteve Wise break; 2090cfdda9d7SSteve Wise case MPA_REP_SENT: 2091cfdda9d7SSteve Wise break; 2092cfdda9d7SSteve Wise case MPA_REQ_RCVD: 2093cfdda9d7SSteve Wise break; 2094cfdda9d7SSteve Wise case MORIBUND: 2095cfdda9d7SSteve Wise case CLOSING: 2096ca5a2202SSteve Wise stop_ep_timer(ep); 2097cfdda9d7SSteve Wise /*FALLTHROUGH*/ 2098cfdda9d7SSteve Wise case FPDU_MODE: 2099cfdda9d7SSteve Wise if (ep->com.cm_id && ep->com.qp) { 2100cfdda9d7SSteve Wise attrs.next_state = C4IW_QP_STATE_ERROR; 2101cfdda9d7SSteve Wise ret = c4iw_modify_qp(ep->com.qp->rhp, 2102cfdda9d7SSteve Wise ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 2103cfdda9d7SSteve Wise &attrs, 1); 2104cfdda9d7SSteve Wise if (ret) 2105cfdda9d7SSteve Wise printk(KERN_ERR MOD 2106cfdda9d7SSteve Wise "%s - qp <- error failed!\n", 2107cfdda9d7SSteve Wise __func__); 2108cfdda9d7SSteve Wise } 2109cfdda9d7SSteve Wise peer_abort_upcall(ep); 2110cfdda9d7SSteve Wise break; 2111cfdda9d7SSteve Wise case ABORTING: 2112cfdda9d7SSteve Wise break; 2113cfdda9d7SSteve Wise case DEAD: 2114cfdda9d7SSteve Wise PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); 21152f5b48c3SSteve Wise mutex_unlock(&ep->com.mutex); 2116cfdda9d7SSteve Wise return 0; 2117cfdda9d7SSteve Wise default: 2118cfdda9d7SSteve Wise BUG_ON(1); 2119cfdda9d7SSteve Wise break; 2120cfdda9d7SSteve Wise } 2121cfdda9d7SSteve Wise dst_confirm(ep->dst); 2122cfdda9d7SSteve Wise if (ep->com.state != ABORTING) { 2123cfdda9d7SSteve Wise __state_set(&ep->com, DEAD); 2124d2fe99e8SKumar Sanghvi /* we don't release if we want to retry with mpa_v1 */ 2125d2fe99e8SKumar Sanghvi if (!ep->retry_with_mpa_v1) 2126cfdda9d7SSteve Wise release = 1; 2127cfdda9d7SSteve Wise } 21282f5b48c3SSteve Wise mutex_unlock(&ep->com.mutex); 2129cfdda9d7SSteve Wise 2130cfdda9d7SSteve Wise rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); 2131cfdda9d7SSteve Wise if (!rpl_skb) { 2132cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - cannot allocate skb!\n", 2133cfdda9d7SSteve Wise __func__); 2134cfdda9d7SSteve Wise release = 1; 2135cfdda9d7SSteve Wise goto out; 2136cfdda9d7SSteve Wise } 2137cfdda9d7SSteve Wise set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 2138cfdda9d7SSteve Wise rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); 2139cfdda9d7SSteve Wise INIT_TP_WR(rpl, ep->hwtid); 2140cfdda9d7SSteve Wise OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); 2141cfdda9d7SSteve Wise rpl->cmd = CPL_ABORT_NO_RST; 2142cfdda9d7SSteve Wise c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); 2143cfdda9d7SSteve Wise out: 2144cfdda9d7SSteve Wise if (release) 2145cfdda9d7SSteve Wise release_ep_resources(ep); 2146d2fe99e8SKumar Sanghvi 2147d2fe99e8SKumar Sanghvi /* retry with mpa-v1 */ 2148d2fe99e8SKumar Sanghvi if (ep && ep->retry_with_mpa_v1) { 2149d2fe99e8SKumar Sanghvi cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); 2150d2fe99e8SKumar Sanghvi dst_release(ep->dst); 2151d2fe99e8SKumar Sanghvi cxgb4_l2t_release(ep->l2t); 2152d2fe99e8SKumar Sanghvi c4iw_reconnect(ep); 2153d2fe99e8SKumar Sanghvi } 2154d2fe99e8SKumar Sanghvi 2155cfdda9d7SSteve Wise return 0; 2156cfdda9d7SSteve Wise } 2157cfdda9d7SSteve Wise 2158cfdda9d7SSteve Wise static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2159cfdda9d7SSteve Wise { 2160cfdda9d7SSteve Wise struct c4iw_ep *ep; 2161cfdda9d7SSteve Wise struct c4iw_qp_attributes attrs; 2162cfdda9d7SSteve Wise struct cpl_close_con_rpl *rpl = cplhdr(skb); 2163cfdda9d7SSteve Wise int release = 0; 2164cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 2165cfdda9d7SSteve Wise unsigned int tid = GET_TID(rpl); 2166cfdda9d7SSteve Wise 2167cfdda9d7SSteve Wise ep = lookup_tid(t, tid); 2168cfdda9d7SSteve Wise 2169cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2170cfdda9d7SSteve Wise BUG_ON(!ep); 2171cfdda9d7SSteve Wise 2172cfdda9d7SSteve Wise /* The cm_id may be null if we failed to connect */ 21732f5b48c3SSteve Wise mutex_lock(&ep->com.mutex); 2174cfdda9d7SSteve Wise switch (ep->com.state) { 2175cfdda9d7SSteve Wise case CLOSING: 2176cfdda9d7SSteve Wise __state_set(&ep->com, MORIBUND); 2177cfdda9d7SSteve Wise break; 2178cfdda9d7SSteve Wise case MORIBUND: 2179ca5a2202SSteve Wise stop_ep_timer(ep); 2180cfdda9d7SSteve Wise if ((ep->com.cm_id) && (ep->com.qp)) { 2181cfdda9d7SSteve Wise attrs.next_state = C4IW_QP_STATE_IDLE; 2182cfdda9d7SSteve Wise c4iw_modify_qp(ep->com.qp->rhp, 2183cfdda9d7SSteve Wise ep->com.qp, 2184cfdda9d7SSteve Wise C4IW_QP_ATTR_NEXT_STATE, 2185cfdda9d7SSteve Wise &attrs, 1); 2186cfdda9d7SSteve Wise } 2187cfdda9d7SSteve Wise close_complete_upcall(ep); 2188cfdda9d7SSteve Wise __state_set(&ep->com, DEAD); 2189cfdda9d7SSteve Wise release = 1; 2190cfdda9d7SSteve Wise break; 2191cfdda9d7SSteve Wise case ABORTING: 2192cfdda9d7SSteve Wise case DEAD: 2193cfdda9d7SSteve Wise break; 2194cfdda9d7SSteve Wise default: 2195cfdda9d7SSteve Wise BUG_ON(1); 2196cfdda9d7SSteve Wise break; 2197cfdda9d7SSteve Wise } 21982f5b48c3SSteve Wise mutex_unlock(&ep->com.mutex); 2199cfdda9d7SSteve Wise if (release) 2200cfdda9d7SSteve Wise release_ep_resources(ep); 2201cfdda9d7SSteve Wise return 0; 2202cfdda9d7SSteve Wise } 2203cfdda9d7SSteve Wise 2204cfdda9d7SSteve Wise static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) 2205cfdda9d7SSteve Wise { 22060e42c1f4SSteve Wise struct cpl_rdma_terminate *rpl = cplhdr(skb); 2207cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 22080e42c1f4SSteve Wise unsigned int tid = GET_TID(rpl); 22090e42c1f4SSteve Wise struct c4iw_ep *ep; 22100e42c1f4SSteve Wise struct c4iw_qp_attributes attrs; 2211cfdda9d7SSteve Wise 2212cfdda9d7SSteve Wise ep = lookup_tid(t, tid); 22130e42c1f4SSteve Wise BUG_ON(!ep); 2214cfdda9d7SSteve Wise 221530c95c2dSSteve Wise if (ep && ep->com.qp) { 22160e42c1f4SSteve Wise printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, 22170e42c1f4SSteve Wise ep->com.qp->wq.sq.qid); 22180e42c1f4SSteve Wise attrs.next_state = C4IW_QP_STATE_TERMINATE; 22190e42c1f4SSteve Wise c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 22200e42c1f4SSteve Wise C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 22210e42c1f4SSteve Wise } else 222230c95c2dSSteve Wise printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); 2223cfdda9d7SSteve Wise 2224cfdda9d7SSteve Wise return 0; 2225cfdda9d7SSteve Wise } 2226cfdda9d7SSteve Wise 2227cfdda9d7SSteve Wise /* 2228cfdda9d7SSteve Wise * Upcall from the adapter indicating data has been transmitted. 2229cfdda9d7SSteve Wise * For us its just the single MPA request or reply. We can now free 2230cfdda9d7SSteve Wise * the skb holding the mpa message. 2231cfdda9d7SSteve Wise */ 2232cfdda9d7SSteve Wise static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) 2233cfdda9d7SSteve Wise { 2234cfdda9d7SSteve Wise struct c4iw_ep *ep; 2235cfdda9d7SSteve Wise struct cpl_fw4_ack *hdr = cplhdr(skb); 2236cfdda9d7SSteve Wise u8 credits = hdr->credits; 2237cfdda9d7SSteve Wise unsigned int tid = GET_TID(hdr); 2238cfdda9d7SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 2239cfdda9d7SSteve Wise 2240cfdda9d7SSteve Wise 2241cfdda9d7SSteve Wise ep = lookup_tid(t, tid); 2242cfdda9d7SSteve Wise PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); 2243cfdda9d7SSteve Wise if (credits == 0) { 2244aa1ad260SJoe Perches PDBG("%s 0 credit ack ep %p tid %u state %u\n", 2245cfdda9d7SSteve Wise __func__, ep, ep->hwtid, state_read(&ep->com)); 2246cfdda9d7SSteve Wise return 0; 2247cfdda9d7SSteve Wise } 2248cfdda9d7SSteve Wise 2249cfdda9d7SSteve Wise dst_confirm(ep->dst); 2250cfdda9d7SSteve Wise if (ep->mpa_skb) { 2251cfdda9d7SSteve Wise PDBG("%s last streaming msg ack ep %p tid %u state %u " 2252cfdda9d7SSteve Wise "initiator %u freeing skb\n", __func__, ep, ep->hwtid, 2253cfdda9d7SSteve Wise state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); 2254cfdda9d7SSteve Wise kfree_skb(ep->mpa_skb); 2255cfdda9d7SSteve Wise ep->mpa_skb = NULL; 2256cfdda9d7SSteve Wise } 2257cfdda9d7SSteve Wise return 0; 2258cfdda9d7SSteve Wise } 2259cfdda9d7SSteve Wise 2260cfdda9d7SSteve Wise int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 2261cfdda9d7SSteve Wise { 2262cfdda9d7SSteve Wise int err; 2263cfdda9d7SSteve Wise struct c4iw_ep *ep = to_ep(cm_id); 2264cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2265cfdda9d7SSteve Wise 2266cfdda9d7SSteve Wise if (state_read(&ep->com) == DEAD) { 2267cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 2268cfdda9d7SSteve Wise return -ECONNRESET; 2269cfdda9d7SSteve Wise } 2270cfdda9d7SSteve Wise BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 2271cfdda9d7SSteve Wise if (mpa_rev == 0) 2272cfdda9d7SSteve Wise abort_connection(ep, NULL, GFP_KERNEL); 2273cfdda9d7SSteve Wise else { 2274cfdda9d7SSteve Wise err = send_mpa_reject(ep, pdata, pdata_len); 2275cfdda9d7SSteve Wise err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 2276cfdda9d7SSteve Wise } 2277cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 2278cfdda9d7SSteve Wise return 0; 2279cfdda9d7SSteve Wise } 2280cfdda9d7SSteve Wise 2281cfdda9d7SSteve Wise int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 2282cfdda9d7SSteve Wise { 2283cfdda9d7SSteve Wise int err; 2284cfdda9d7SSteve Wise struct c4iw_qp_attributes attrs; 2285cfdda9d7SSteve Wise enum c4iw_qp_attr_mask mask; 2286cfdda9d7SSteve Wise struct c4iw_ep *ep = to_ep(cm_id); 2287cfdda9d7SSteve Wise struct c4iw_dev *h = to_c4iw_dev(cm_id->device); 2288cfdda9d7SSteve Wise struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); 2289cfdda9d7SSteve Wise 2290cfdda9d7SSteve Wise PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2291cfdda9d7SSteve Wise if (state_read(&ep->com) == DEAD) { 2292cfdda9d7SSteve Wise err = -ECONNRESET; 2293cfdda9d7SSteve Wise goto err; 2294cfdda9d7SSteve Wise } 2295cfdda9d7SSteve Wise 2296cfdda9d7SSteve Wise BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 2297cfdda9d7SSteve Wise BUG_ON(!qp); 2298cfdda9d7SSteve Wise 2299be4c9badSRoland Dreier if ((conn_param->ord > c4iw_max_read_depth) || 2300be4c9badSRoland Dreier (conn_param->ird > c4iw_max_read_depth)) { 2301cfdda9d7SSteve Wise abort_connection(ep, NULL, GFP_KERNEL); 2302cfdda9d7SSteve Wise err = -EINVAL; 2303cfdda9d7SSteve Wise goto err; 2304cfdda9d7SSteve Wise } 2305cfdda9d7SSteve Wise 2306d2fe99e8SKumar Sanghvi if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 2307d2fe99e8SKumar Sanghvi if (conn_param->ord > ep->ird) { 2308d2fe99e8SKumar Sanghvi ep->ird = conn_param->ird; 2309d2fe99e8SKumar Sanghvi ep->ord = conn_param->ord; 2310d2fe99e8SKumar Sanghvi send_mpa_reject(ep, conn_param->private_data, 2311d2fe99e8SKumar Sanghvi conn_param->private_data_len); 2312d2fe99e8SKumar Sanghvi abort_connection(ep, NULL, GFP_KERNEL); 2313d2fe99e8SKumar Sanghvi err = -ENOMEM; 2314d2fe99e8SKumar Sanghvi goto err; 2315d2fe99e8SKumar Sanghvi } 2316d2fe99e8SKumar Sanghvi if (conn_param->ird > ep->ord) { 2317d2fe99e8SKumar Sanghvi if (!ep->ord) 2318d2fe99e8SKumar Sanghvi conn_param->ird = 1; 2319d2fe99e8SKumar Sanghvi else { 2320d2fe99e8SKumar Sanghvi abort_connection(ep, NULL, GFP_KERNEL); 2321d2fe99e8SKumar Sanghvi err = -ENOMEM; 2322d2fe99e8SKumar Sanghvi goto err; 2323d2fe99e8SKumar Sanghvi } 2324d2fe99e8SKumar Sanghvi } 2325cfdda9d7SSteve Wise 2326d2fe99e8SKumar Sanghvi } 2327cfdda9d7SSteve Wise ep->ird = conn_param->ird; 2328cfdda9d7SSteve Wise ep->ord = conn_param->ord; 2329cfdda9d7SSteve Wise 2330d2fe99e8SKumar Sanghvi if (ep->mpa_attr.version != 2) 2331cfdda9d7SSteve Wise if (peer2peer && ep->ird == 0) 2332cfdda9d7SSteve Wise ep->ird = 1; 2333cfdda9d7SSteve Wise 2334cfdda9d7SSteve Wise PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); 2335cfdda9d7SSteve Wise 2336d2fe99e8SKumar Sanghvi cm_id->add_ref(cm_id); 2337d2fe99e8SKumar Sanghvi ep->com.cm_id = cm_id; 2338d2fe99e8SKumar Sanghvi ep->com.qp = qp; 2339d2fe99e8SKumar Sanghvi 2340cfdda9d7SSteve Wise /* bind QP to EP and move to RTS */ 2341cfdda9d7SSteve Wise attrs.mpa_attr = ep->mpa_attr; 2342cfdda9d7SSteve Wise attrs.max_ird = ep->ird; 2343cfdda9d7SSteve Wise attrs.max_ord = ep->ord; 2344cfdda9d7SSteve Wise attrs.llp_stream_handle = ep; 2345cfdda9d7SSteve Wise attrs.next_state = C4IW_QP_STATE_RTS; 2346cfdda9d7SSteve Wise 2347cfdda9d7SSteve Wise /* bind QP and TID with INIT_WR */ 2348cfdda9d7SSteve Wise mask = C4IW_QP_ATTR_NEXT_STATE | 2349cfdda9d7SSteve Wise C4IW_QP_ATTR_LLP_STREAM_HANDLE | 2350cfdda9d7SSteve Wise C4IW_QP_ATTR_MPA_ATTR | 2351cfdda9d7SSteve Wise C4IW_QP_ATTR_MAX_IRD | 2352cfdda9d7SSteve Wise C4IW_QP_ATTR_MAX_ORD; 2353cfdda9d7SSteve Wise 2354cfdda9d7SSteve Wise err = c4iw_modify_qp(ep->com.qp->rhp, 2355cfdda9d7SSteve Wise ep->com.qp, mask, &attrs, 1); 2356cfdda9d7SSteve Wise if (err) 2357cfdda9d7SSteve Wise goto err1; 2358cfdda9d7SSteve Wise err = send_mpa_reply(ep, conn_param->private_data, 2359cfdda9d7SSteve Wise conn_param->private_data_len); 2360cfdda9d7SSteve Wise if (err) 2361cfdda9d7SSteve Wise goto err1; 2362cfdda9d7SSteve Wise 2363cfdda9d7SSteve Wise state_set(&ep->com, FPDU_MODE); 2364cfdda9d7SSteve Wise established_upcall(ep); 2365cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 2366cfdda9d7SSteve Wise return 0; 2367cfdda9d7SSteve Wise err1: 2368cfdda9d7SSteve Wise ep->com.cm_id = NULL; 2369cfdda9d7SSteve Wise ep->com.qp = NULL; 2370cfdda9d7SSteve Wise cm_id->rem_ref(cm_id); 2371cfdda9d7SSteve Wise err: 2372cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 2373cfdda9d7SSteve Wise return err; 2374cfdda9d7SSteve Wise } 2375cfdda9d7SSteve Wise 2376cfdda9d7SSteve Wise int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 2377cfdda9d7SSteve Wise { 2378cfdda9d7SSteve Wise struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2379cfdda9d7SSteve Wise struct c4iw_ep *ep; 2380cfdda9d7SSteve Wise struct rtable *rt; 23813786cf18SDavid Miller int err = 0; 2382cfdda9d7SSteve Wise 2383be4c9badSRoland Dreier if ((conn_param->ord > c4iw_max_read_depth) || 2384be4c9badSRoland Dreier (conn_param->ird > c4iw_max_read_depth)) { 2385be4c9badSRoland Dreier err = -EINVAL; 2386be4c9badSRoland Dreier goto out; 2387be4c9badSRoland Dreier } 2388cfdda9d7SSteve Wise ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 2389cfdda9d7SSteve Wise if (!ep) { 2390cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 2391cfdda9d7SSteve Wise err = -ENOMEM; 2392cfdda9d7SSteve Wise goto out; 2393cfdda9d7SSteve Wise } 2394cfdda9d7SSteve Wise init_timer(&ep->timer); 2395cfdda9d7SSteve Wise ep->plen = conn_param->private_data_len; 2396cfdda9d7SSteve Wise if (ep->plen) 2397cfdda9d7SSteve Wise memcpy(ep->mpa_pkt + sizeof(struct mpa_message), 2398cfdda9d7SSteve Wise conn_param->private_data, ep->plen); 2399cfdda9d7SSteve Wise ep->ird = conn_param->ird; 2400cfdda9d7SSteve Wise ep->ord = conn_param->ord; 2401cfdda9d7SSteve Wise 2402cfdda9d7SSteve Wise if (peer2peer && ep->ord == 0) 2403cfdda9d7SSteve Wise ep->ord = 1; 2404cfdda9d7SSteve Wise 2405cfdda9d7SSteve Wise cm_id->add_ref(cm_id); 2406cfdda9d7SSteve Wise ep->com.dev = dev; 2407cfdda9d7SSteve Wise ep->com.cm_id = cm_id; 2408cfdda9d7SSteve Wise ep->com.qp = get_qhp(dev, conn_param->qpn); 2409cfdda9d7SSteve Wise BUG_ON(!ep->com.qp); 2410cfdda9d7SSteve Wise PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, 2411cfdda9d7SSteve Wise ep->com.qp, cm_id); 2412cfdda9d7SSteve Wise 2413cfdda9d7SSteve Wise /* 2414cfdda9d7SSteve Wise * Allocate an active TID to initiate a TCP connection. 2415cfdda9d7SSteve Wise */ 2416cfdda9d7SSteve Wise ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep); 2417cfdda9d7SSteve Wise if (ep->atid == -1) { 2418cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); 2419cfdda9d7SSteve Wise err = -ENOMEM; 2420cfdda9d7SSteve Wise goto fail2; 2421cfdda9d7SSteve Wise } 2422cfdda9d7SSteve Wise 2423cfdda9d7SSteve Wise PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__, 2424cfdda9d7SSteve Wise ntohl(cm_id->local_addr.sin_addr.s_addr), 2425cfdda9d7SSteve Wise ntohs(cm_id->local_addr.sin_port), 2426cfdda9d7SSteve Wise ntohl(cm_id->remote_addr.sin_addr.s_addr), 2427cfdda9d7SSteve Wise ntohs(cm_id->remote_addr.sin_port)); 2428cfdda9d7SSteve Wise 2429cfdda9d7SSteve Wise /* find a route */ 2430cfdda9d7SSteve Wise rt = find_route(dev, 2431cfdda9d7SSteve Wise cm_id->local_addr.sin_addr.s_addr, 2432cfdda9d7SSteve Wise cm_id->remote_addr.sin_addr.s_addr, 2433cfdda9d7SSteve Wise cm_id->local_addr.sin_port, 2434cfdda9d7SSteve Wise cm_id->remote_addr.sin_port, 0); 2435cfdda9d7SSteve Wise if (!rt) { 2436cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); 2437cfdda9d7SSteve Wise err = -EHOSTUNREACH; 2438cfdda9d7SSteve Wise goto fail3; 2439cfdda9d7SSteve Wise } 2440d8d1f30bSChangli Gao ep->dst = &rt->dst; 2441cfdda9d7SSteve Wise 24423786cf18SDavid Miller err = import_ep(ep, cm_id->remote_addr.sin_addr.s_addr, 24433786cf18SDavid Miller ep->dst, ep->com.dev, true); 24443786cf18SDavid Miller if (err) { 2445cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 2446cfdda9d7SSteve Wise goto fail4; 2447cfdda9d7SSteve Wise } 2448cfdda9d7SSteve Wise 2449cfdda9d7SSteve Wise PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 2450cfdda9d7SSteve Wise __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 2451cfdda9d7SSteve Wise ep->l2t->idx); 2452cfdda9d7SSteve Wise 2453cfdda9d7SSteve Wise state_set(&ep->com, CONNECTING); 2454cfdda9d7SSteve Wise ep->tos = 0; 2455cfdda9d7SSteve Wise ep->com.local_addr = cm_id->local_addr; 2456cfdda9d7SSteve Wise ep->com.remote_addr = cm_id->remote_addr; 2457cfdda9d7SSteve Wise 2458cfdda9d7SSteve Wise /* send connect request to rnic */ 2459cfdda9d7SSteve Wise err = send_connect(ep); 2460cfdda9d7SSteve Wise if (!err) 2461cfdda9d7SSteve Wise goto out; 2462cfdda9d7SSteve Wise 2463cfdda9d7SSteve Wise cxgb4_l2t_release(ep->l2t); 2464cfdda9d7SSteve Wise fail4: 2465cfdda9d7SSteve Wise dst_release(ep->dst); 2466cfdda9d7SSteve Wise fail3: 2467cfdda9d7SSteve Wise cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 2468cfdda9d7SSteve Wise fail2: 2469cfdda9d7SSteve Wise cm_id->rem_ref(cm_id); 2470cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 2471cfdda9d7SSteve Wise out: 2472cfdda9d7SSteve Wise return err; 2473cfdda9d7SSteve Wise } 2474cfdda9d7SSteve Wise 2475cfdda9d7SSteve Wise int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) 2476cfdda9d7SSteve Wise { 2477cfdda9d7SSteve Wise int err = 0; 2478cfdda9d7SSteve Wise struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2479cfdda9d7SSteve Wise struct c4iw_listen_ep *ep; 2480cfdda9d7SSteve Wise 2481cfdda9d7SSteve Wise 2482cfdda9d7SSteve Wise might_sleep(); 2483cfdda9d7SSteve Wise 2484cfdda9d7SSteve Wise ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 2485cfdda9d7SSteve Wise if (!ep) { 2486cfdda9d7SSteve Wise printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 2487cfdda9d7SSteve Wise err = -ENOMEM; 2488cfdda9d7SSteve Wise goto fail1; 2489cfdda9d7SSteve Wise } 2490cfdda9d7SSteve Wise PDBG("%s ep %p\n", __func__, ep); 2491cfdda9d7SSteve Wise cm_id->add_ref(cm_id); 2492cfdda9d7SSteve Wise ep->com.cm_id = cm_id; 2493cfdda9d7SSteve Wise ep->com.dev = dev; 2494cfdda9d7SSteve Wise ep->backlog = backlog; 2495cfdda9d7SSteve Wise ep->com.local_addr = cm_id->local_addr; 2496cfdda9d7SSteve Wise 2497cfdda9d7SSteve Wise /* 2498cfdda9d7SSteve Wise * Allocate a server TID. 2499cfdda9d7SSteve Wise */ 2500cfdda9d7SSteve Wise ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep); 2501cfdda9d7SSteve Wise if (ep->stid == -1) { 2502be4c9badSRoland Dreier printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__); 2503cfdda9d7SSteve Wise err = -ENOMEM; 2504cfdda9d7SSteve Wise goto fail2; 2505cfdda9d7SSteve Wise } 2506cfdda9d7SSteve Wise 2507cfdda9d7SSteve Wise state_set(&ep->com, LISTEN); 2508aadc4df3SSteve Wise c4iw_init_wr_wait(&ep->com.wr_wait); 2509cfdda9d7SSteve Wise err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid, 2510cfdda9d7SSteve Wise ep->com.local_addr.sin_addr.s_addr, 2511cfdda9d7SSteve Wise ep->com.local_addr.sin_port, 2512cfdda9d7SSteve Wise ep->com.dev->rdev.lldi.rxq_ids[0]); 2513cfdda9d7SSteve Wise if (err) 2514cfdda9d7SSteve Wise goto fail3; 2515cfdda9d7SSteve Wise 2516cfdda9d7SSteve Wise /* wait for pass_open_rpl */ 2517aadc4df3SSteve Wise err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, 2518aadc4df3SSteve Wise __func__); 2519cfdda9d7SSteve Wise if (!err) { 2520cfdda9d7SSteve Wise cm_id->provider_data = ep; 2521cfdda9d7SSteve Wise goto out; 2522cfdda9d7SSteve Wise } 2523cfdda9d7SSteve Wise fail3: 2524cfdda9d7SSteve Wise cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); 2525cfdda9d7SSteve Wise fail2: 2526cfdda9d7SSteve Wise cm_id->rem_ref(cm_id); 2527cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 2528cfdda9d7SSteve Wise fail1: 2529cfdda9d7SSteve Wise out: 2530cfdda9d7SSteve Wise return err; 2531cfdda9d7SSteve Wise } 2532cfdda9d7SSteve Wise 2533cfdda9d7SSteve Wise int c4iw_destroy_listen(struct iw_cm_id *cm_id) 2534cfdda9d7SSteve Wise { 2535cfdda9d7SSteve Wise int err; 2536cfdda9d7SSteve Wise struct c4iw_listen_ep *ep = to_listen_ep(cm_id); 2537cfdda9d7SSteve Wise 2538cfdda9d7SSteve Wise PDBG("%s ep %p\n", __func__, ep); 2539cfdda9d7SSteve Wise 2540cfdda9d7SSteve Wise might_sleep(); 2541cfdda9d7SSteve Wise state_set(&ep->com, DEAD); 2542aadc4df3SSteve Wise c4iw_init_wr_wait(&ep->com.wr_wait); 2543cfdda9d7SSteve Wise err = listen_stop(ep); 2544cfdda9d7SSteve Wise if (err) 2545cfdda9d7SSteve Wise goto done; 2546aadc4df3SSteve Wise err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, 2547aadc4df3SSteve Wise __func__); 2548cfdda9d7SSteve Wise cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); 2549cfdda9d7SSteve Wise done: 2550cfdda9d7SSteve Wise cm_id->rem_ref(cm_id); 2551cfdda9d7SSteve Wise c4iw_put_ep(&ep->com); 2552cfdda9d7SSteve Wise return err; 2553cfdda9d7SSteve Wise } 2554cfdda9d7SSteve Wise 2555cfdda9d7SSteve Wise int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) 2556cfdda9d7SSteve Wise { 2557cfdda9d7SSteve Wise int ret = 0; 2558cfdda9d7SSteve Wise int close = 0; 2559cfdda9d7SSteve Wise int fatal = 0; 2560cfdda9d7SSteve Wise struct c4iw_rdev *rdev; 2561cfdda9d7SSteve Wise 25622f5b48c3SSteve Wise mutex_lock(&ep->com.mutex); 2563cfdda9d7SSteve Wise 2564cfdda9d7SSteve Wise PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, 2565cfdda9d7SSteve Wise states[ep->com.state], abrupt); 2566cfdda9d7SSteve Wise 2567cfdda9d7SSteve Wise rdev = &ep->com.dev->rdev; 2568cfdda9d7SSteve Wise if (c4iw_fatal_error(rdev)) { 2569cfdda9d7SSteve Wise fatal = 1; 2570cfdda9d7SSteve Wise close_complete_upcall(ep); 2571cfdda9d7SSteve Wise ep->com.state = DEAD; 2572cfdda9d7SSteve Wise } 2573cfdda9d7SSteve Wise switch (ep->com.state) { 2574cfdda9d7SSteve Wise case MPA_REQ_WAIT: 2575cfdda9d7SSteve Wise case MPA_REQ_SENT: 2576cfdda9d7SSteve Wise case MPA_REQ_RCVD: 2577cfdda9d7SSteve Wise case MPA_REP_SENT: 2578cfdda9d7SSteve Wise case FPDU_MODE: 2579cfdda9d7SSteve Wise close = 1; 2580cfdda9d7SSteve Wise if (abrupt) 2581cfdda9d7SSteve Wise ep->com.state = ABORTING; 2582cfdda9d7SSteve Wise else { 2583cfdda9d7SSteve Wise ep->com.state = CLOSING; 2584ca5a2202SSteve Wise start_ep_timer(ep); 2585cfdda9d7SSteve Wise } 2586cfdda9d7SSteve Wise set_bit(CLOSE_SENT, &ep->com.flags); 2587cfdda9d7SSteve Wise break; 2588cfdda9d7SSteve Wise case CLOSING: 2589cfdda9d7SSteve Wise if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { 2590cfdda9d7SSteve Wise close = 1; 2591cfdda9d7SSteve Wise if (abrupt) { 2592ca5a2202SSteve Wise stop_ep_timer(ep); 2593cfdda9d7SSteve Wise ep->com.state = ABORTING; 2594cfdda9d7SSteve Wise } else 2595cfdda9d7SSteve Wise ep->com.state = MORIBUND; 2596cfdda9d7SSteve Wise } 2597cfdda9d7SSteve Wise break; 2598cfdda9d7SSteve Wise case MORIBUND: 2599cfdda9d7SSteve Wise case ABORTING: 2600cfdda9d7SSteve Wise case DEAD: 2601cfdda9d7SSteve Wise PDBG("%s ignoring disconnect ep %p state %u\n", 2602cfdda9d7SSteve Wise __func__, ep, ep->com.state); 2603cfdda9d7SSteve Wise break; 2604cfdda9d7SSteve Wise default: 2605cfdda9d7SSteve Wise BUG(); 2606cfdda9d7SSteve Wise break; 2607cfdda9d7SSteve Wise } 2608cfdda9d7SSteve Wise 2609cfdda9d7SSteve Wise if (close) { 26108da7e7a5SSteve Wise if (abrupt) { 26118da7e7a5SSteve Wise close_complete_upcall(ep); 26128da7e7a5SSteve Wise ret = send_abort(ep, NULL, gfp); 26138da7e7a5SSteve Wise } else 2614cfdda9d7SSteve Wise ret = send_halfclose(ep, gfp); 2615cfdda9d7SSteve Wise if (ret) 2616cfdda9d7SSteve Wise fatal = 1; 2617cfdda9d7SSteve Wise } 26188da7e7a5SSteve Wise mutex_unlock(&ep->com.mutex); 2619cfdda9d7SSteve Wise if (fatal) 2620cfdda9d7SSteve Wise release_ep_resources(ep); 2621cfdda9d7SSteve Wise return ret; 2622cfdda9d7SSteve Wise } 2623cfdda9d7SSteve Wise 26242f5b48c3SSteve Wise static int async_event(struct c4iw_dev *dev, struct sk_buff *skb) 26252f5b48c3SSteve Wise { 26262f5b48c3SSteve Wise struct cpl_fw6_msg *rpl = cplhdr(skb); 26272f5b48c3SSteve Wise c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); 26282f5b48c3SSteve Wise return 0; 26292f5b48c3SSteve Wise } 26302f5b48c3SSteve Wise 2631cfdda9d7SSteve Wise /* 2632be4c9badSRoland Dreier * These are the real handlers that are called from a 2633be4c9badSRoland Dreier * work queue. 2634be4c9badSRoland Dreier */ 2635be4c9badSRoland Dreier static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = { 2636be4c9badSRoland Dreier [CPL_ACT_ESTABLISH] = act_establish, 2637be4c9badSRoland Dreier [CPL_ACT_OPEN_RPL] = act_open_rpl, 2638be4c9badSRoland Dreier [CPL_RX_DATA] = rx_data, 2639be4c9badSRoland Dreier [CPL_ABORT_RPL_RSS] = abort_rpl, 2640be4c9badSRoland Dreier [CPL_ABORT_RPL] = abort_rpl, 2641be4c9badSRoland Dreier [CPL_PASS_OPEN_RPL] = pass_open_rpl, 2642be4c9badSRoland Dreier [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl, 2643be4c9badSRoland Dreier [CPL_PASS_ACCEPT_REQ] = pass_accept_req, 2644be4c9badSRoland Dreier [CPL_PASS_ESTABLISH] = pass_establish, 2645be4c9badSRoland Dreier [CPL_PEER_CLOSE] = peer_close, 2646be4c9badSRoland Dreier [CPL_ABORT_REQ_RSS] = peer_abort, 2647be4c9badSRoland Dreier [CPL_CLOSE_CON_RPL] = close_con_rpl, 2648be4c9badSRoland Dreier [CPL_RDMA_TERMINATE] = terminate, 26492f5b48c3SSteve Wise [CPL_FW4_ACK] = fw4_ack, 26502f5b48c3SSteve Wise [CPL_FW6_MSG] = async_event 2651be4c9badSRoland Dreier }; 2652be4c9badSRoland Dreier 2653be4c9badSRoland Dreier static void process_timeout(struct c4iw_ep *ep) 2654be4c9badSRoland Dreier { 2655be4c9badSRoland Dreier struct c4iw_qp_attributes attrs; 2656be4c9badSRoland Dreier int abort = 1; 2657be4c9badSRoland Dreier 26582f5b48c3SSteve Wise mutex_lock(&ep->com.mutex); 2659be4c9badSRoland Dreier PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, 2660be4c9badSRoland Dreier ep->com.state); 2661be4c9badSRoland Dreier switch (ep->com.state) { 2662be4c9badSRoland Dreier case MPA_REQ_SENT: 2663be4c9badSRoland Dreier __state_set(&ep->com, ABORTING); 2664be4c9badSRoland Dreier connect_reply_upcall(ep, -ETIMEDOUT); 2665be4c9badSRoland Dreier break; 2666be4c9badSRoland Dreier case MPA_REQ_WAIT: 2667be4c9badSRoland Dreier __state_set(&ep->com, ABORTING); 2668be4c9badSRoland Dreier break; 2669be4c9badSRoland Dreier case CLOSING: 2670be4c9badSRoland Dreier case MORIBUND: 2671be4c9badSRoland Dreier if (ep->com.cm_id && ep->com.qp) { 2672be4c9badSRoland Dreier attrs.next_state = C4IW_QP_STATE_ERROR; 2673be4c9badSRoland Dreier c4iw_modify_qp(ep->com.qp->rhp, 2674be4c9badSRoland Dreier ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 2675be4c9badSRoland Dreier &attrs, 1); 2676be4c9badSRoland Dreier } 2677be4c9badSRoland Dreier __state_set(&ep->com, ABORTING); 2678be4c9badSRoland Dreier break; 2679be4c9badSRoland Dreier default: 268076f267b7SJulia Lawall WARN(1, "%s unexpected state ep %p tid %u state %u\n", 2681be4c9badSRoland Dreier __func__, ep, ep->hwtid, ep->com.state); 2682be4c9badSRoland Dreier abort = 0; 2683be4c9badSRoland Dreier } 26842f5b48c3SSteve Wise mutex_unlock(&ep->com.mutex); 2685be4c9badSRoland Dreier if (abort) 2686be4c9badSRoland Dreier abort_connection(ep, NULL, GFP_KERNEL); 2687be4c9badSRoland Dreier c4iw_put_ep(&ep->com); 2688be4c9badSRoland Dreier } 2689be4c9badSRoland Dreier 2690be4c9badSRoland Dreier static void process_timedout_eps(void) 2691be4c9badSRoland Dreier { 2692be4c9badSRoland Dreier struct c4iw_ep *ep; 2693be4c9badSRoland Dreier 2694be4c9badSRoland Dreier spin_lock_irq(&timeout_lock); 2695be4c9badSRoland Dreier while (!list_empty(&timeout_list)) { 2696be4c9badSRoland Dreier struct list_head *tmp; 2697be4c9badSRoland Dreier 2698be4c9badSRoland Dreier tmp = timeout_list.next; 2699be4c9badSRoland Dreier list_del(tmp); 2700be4c9badSRoland Dreier spin_unlock_irq(&timeout_lock); 2701be4c9badSRoland Dreier ep = list_entry(tmp, struct c4iw_ep, entry); 2702be4c9badSRoland Dreier process_timeout(ep); 2703be4c9badSRoland Dreier spin_lock_irq(&timeout_lock); 2704be4c9badSRoland Dreier } 2705be4c9badSRoland Dreier spin_unlock_irq(&timeout_lock); 2706be4c9badSRoland Dreier } 2707be4c9badSRoland Dreier 2708be4c9badSRoland Dreier static void process_work(struct work_struct *work) 2709be4c9badSRoland Dreier { 2710be4c9badSRoland Dreier struct sk_buff *skb = NULL; 2711be4c9badSRoland Dreier struct c4iw_dev *dev; 2712c1d7356cSDan Carpenter struct cpl_act_establish *rpl; 2713be4c9badSRoland Dreier unsigned int opcode; 2714be4c9badSRoland Dreier int ret; 2715be4c9badSRoland Dreier 2716be4c9badSRoland Dreier while ((skb = skb_dequeue(&rxq))) { 2717be4c9badSRoland Dreier rpl = cplhdr(skb); 2718be4c9badSRoland Dreier dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); 2719be4c9badSRoland Dreier opcode = rpl->ot.opcode; 2720be4c9badSRoland Dreier 2721be4c9badSRoland Dreier BUG_ON(!work_handlers[opcode]); 2722be4c9badSRoland Dreier ret = work_handlers[opcode](dev, skb); 2723be4c9badSRoland Dreier if (!ret) 2724be4c9badSRoland Dreier kfree_skb(skb); 2725be4c9badSRoland Dreier } 2726be4c9badSRoland Dreier process_timedout_eps(); 2727be4c9badSRoland Dreier } 2728be4c9badSRoland Dreier 2729be4c9badSRoland Dreier static DECLARE_WORK(skb_work, process_work); 2730be4c9badSRoland Dreier 2731be4c9badSRoland Dreier static void ep_timeout(unsigned long arg) 2732be4c9badSRoland Dreier { 2733be4c9badSRoland Dreier struct c4iw_ep *ep = (struct c4iw_ep *)arg; 2734be4c9badSRoland Dreier 2735be4c9badSRoland Dreier spin_lock(&timeout_lock); 2736be4c9badSRoland Dreier list_add_tail(&ep->entry, &timeout_list); 2737be4c9badSRoland Dreier spin_unlock(&timeout_lock); 2738be4c9badSRoland Dreier queue_work(workq, &skb_work); 2739be4c9badSRoland Dreier } 2740be4c9badSRoland Dreier 2741be4c9badSRoland Dreier /* 2742cfdda9d7SSteve Wise * All the CM events are handled on a work queue to have a safe context. 2743cfdda9d7SSteve Wise */ 2744cfdda9d7SSteve Wise static int sched(struct c4iw_dev *dev, struct sk_buff *skb) 2745cfdda9d7SSteve Wise { 2746cfdda9d7SSteve Wise 2747cfdda9d7SSteve Wise /* 2748cfdda9d7SSteve Wise * Save dev in the skb->cb area. 2749cfdda9d7SSteve Wise */ 2750cfdda9d7SSteve Wise *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev; 2751cfdda9d7SSteve Wise 2752cfdda9d7SSteve Wise /* 2753cfdda9d7SSteve Wise * Queue the skb and schedule the worker thread. 2754cfdda9d7SSteve Wise */ 2755cfdda9d7SSteve Wise skb_queue_tail(&rxq, skb); 2756cfdda9d7SSteve Wise queue_work(workq, &skb_work); 2757cfdda9d7SSteve Wise return 0; 2758cfdda9d7SSteve Wise } 2759cfdda9d7SSteve Wise 2760cfdda9d7SSteve Wise static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2761cfdda9d7SSteve Wise { 2762cfdda9d7SSteve Wise struct cpl_set_tcb_rpl *rpl = cplhdr(skb); 2763cfdda9d7SSteve Wise 2764cfdda9d7SSteve Wise if (rpl->status != CPL_ERR_NONE) { 2765cfdda9d7SSteve Wise printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u " 2766cfdda9d7SSteve Wise "for tid %u\n", rpl->status, GET_TID(rpl)); 2767cfdda9d7SSteve Wise } 27682f5b48c3SSteve Wise kfree_skb(skb); 2769cfdda9d7SSteve Wise return 0; 2770cfdda9d7SSteve Wise } 2771cfdda9d7SSteve Wise 2772be4c9badSRoland Dreier static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) 2773be4c9badSRoland Dreier { 2774be4c9badSRoland Dreier struct cpl_fw6_msg *rpl = cplhdr(skb); 2775be4c9badSRoland Dreier struct c4iw_wr_wait *wr_waitp; 2776be4c9badSRoland Dreier int ret; 27775be78ee9SVipul Pandya u8 opcode; 27785be78ee9SVipul Pandya struct cpl_fw6_msg_ofld_connection_wr_rpl *req; 27795be78ee9SVipul Pandya struct c4iw_ep *ep; 2780be4c9badSRoland Dreier 2781be4c9badSRoland Dreier PDBG("%s type %u\n", __func__, rpl->type); 2782be4c9badSRoland Dreier 2783be4c9badSRoland Dreier switch (rpl->type) { 27845be78ee9SVipul Pandya case FW6_TYPE_WR_RPL: 2785be4c9badSRoland Dreier ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); 2786c8e081a1SRoland Dreier wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; 2787be4c9badSRoland Dreier PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); 2788d9594d99SSteve Wise if (wr_waitp) 2789d9594d99SSteve Wise c4iw_wake_up(wr_waitp, ret ? -ret : 0); 27902f5b48c3SSteve Wise kfree_skb(skb); 2791be4c9badSRoland Dreier break; 27925be78ee9SVipul Pandya case FW6_TYPE_CQE: 27932f5b48c3SSteve Wise sched(dev, skb); 2794be4c9badSRoland Dreier break; 27955be78ee9SVipul Pandya case FW6_TYPE_OFLD_CONNECTION_WR_RPL: 27965be78ee9SVipul Pandya opcode = *(const u8 *)rpl->data; 27975be78ee9SVipul Pandya if (opcode == FW_OFLD_CONNECTION_WR) { 27985be78ee9SVipul Pandya req = 27995be78ee9SVipul Pandya (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data; 28005be78ee9SVipul Pandya if (req->t_state == TCP_SYN_SENT 28015be78ee9SVipul Pandya && (req->retval == FW_ENOMEM 28025be78ee9SVipul Pandya || req->retval == FW_EADDRINUSE)) { 28035be78ee9SVipul Pandya ep = (struct c4iw_ep *) 28045be78ee9SVipul Pandya lookup_atid(dev->rdev.lldi.tids, 28055be78ee9SVipul Pandya req->tid); 28065be78ee9SVipul Pandya c4iw_l2t_send(&dev->rdev, skb, ep->l2t); 28075be78ee9SVipul Pandya return 0; 28085be78ee9SVipul Pandya } 28095be78ee9SVipul Pandya } 28105be78ee9SVipul Pandya break; 2811be4c9badSRoland Dreier default: 2812be4c9badSRoland Dreier printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, 2813be4c9badSRoland Dreier rpl->type); 28142f5b48c3SSteve Wise kfree_skb(skb); 2815be4c9badSRoland Dreier break; 2816be4c9badSRoland Dreier } 2817be4c9badSRoland Dreier return 0; 2818be4c9badSRoland Dreier } 2819be4c9badSRoland Dreier 28208da7e7a5SSteve Wise static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) 28218da7e7a5SSteve Wise { 28228da7e7a5SSteve Wise struct cpl_abort_req_rss *req = cplhdr(skb); 28238da7e7a5SSteve Wise struct c4iw_ep *ep; 28248da7e7a5SSteve Wise struct tid_info *t = dev->rdev.lldi.tids; 28258da7e7a5SSteve Wise unsigned int tid = GET_TID(req); 28268da7e7a5SSteve Wise 28278da7e7a5SSteve Wise ep = lookup_tid(t, tid); 282814b92228SSteve Wise if (!ep) { 282914b92228SSteve Wise printk(KERN_WARNING MOD 283014b92228SSteve Wise "Abort on non-existent endpoint, tid %d\n", tid); 283114b92228SSteve Wise kfree_skb(skb); 283214b92228SSteve Wise return 0; 283314b92228SSteve Wise } 28348da7e7a5SSteve Wise if (is_neg_adv_abort(req->status)) { 28358da7e7a5SSteve Wise PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, 28368da7e7a5SSteve Wise ep->hwtid); 28378da7e7a5SSteve Wise kfree_skb(skb); 28388da7e7a5SSteve Wise return 0; 28398da7e7a5SSteve Wise } 28408da7e7a5SSteve Wise PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 28418da7e7a5SSteve Wise ep->com.state); 28428da7e7a5SSteve Wise 28438da7e7a5SSteve Wise /* 28448da7e7a5SSteve Wise * Wake up any threads in rdma_init() or rdma_fini(). 28458da7e7a5SSteve Wise */ 28468da7e7a5SSteve Wise c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 28478da7e7a5SSteve Wise sched(dev, skb); 28488da7e7a5SSteve Wise return 0; 28498da7e7a5SSteve Wise } 28508da7e7a5SSteve Wise 2851be4c9badSRoland Dreier /* 2852be4c9badSRoland Dreier * Most upcalls from the T4 Core go to sched() to 2853be4c9badSRoland Dreier * schedule the processing on a work queue. 2854be4c9badSRoland Dreier */ 2855be4c9badSRoland Dreier c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = { 2856be4c9badSRoland Dreier [CPL_ACT_ESTABLISH] = sched, 2857be4c9badSRoland Dreier [CPL_ACT_OPEN_RPL] = sched, 2858be4c9badSRoland Dreier [CPL_RX_DATA] = sched, 2859be4c9badSRoland Dreier [CPL_ABORT_RPL_RSS] = sched, 2860be4c9badSRoland Dreier [CPL_ABORT_RPL] = sched, 2861be4c9badSRoland Dreier [CPL_PASS_OPEN_RPL] = sched, 2862be4c9badSRoland Dreier [CPL_CLOSE_LISTSRV_RPL] = sched, 2863be4c9badSRoland Dreier [CPL_PASS_ACCEPT_REQ] = sched, 2864be4c9badSRoland Dreier [CPL_PASS_ESTABLISH] = sched, 2865be4c9badSRoland Dreier [CPL_PEER_CLOSE] = sched, 2866be4c9badSRoland Dreier [CPL_CLOSE_CON_RPL] = sched, 28678da7e7a5SSteve Wise [CPL_ABORT_REQ_RSS] = peer_abort_intr, 2868be4c9badSRoland Dreier [CPL_RDMA_TERMINATE] = sched, 2869be4c9badSRoland Dreier [CPL_FW4_ACK] = sched, 2870be4c9badSRoland Dreier [CPL_SET_TCB_RPL] = set_tcb_rpl, 2871be4c9badSRoland Dreier [CPL_FW6_MSG] = fw6_msg 2872be4c9badSRoland Dreier }; 2873be4c9badSRoland Dreier 2874cfdda9d7SSteve Wise int __init c4iw_cm_init(void) 2875cfdda9d7SSteve Wise { 2876be4c9badSRoland Dreier spin_lock_init(&timeout_lock); 2877cfdda9d7SSteve Wise skb_queue_head_init(&rxq); 2878cfdda9d7SSteve Wise 2879cfdda9d7SSteve Wise workq = create_singlethread_workqueue("iw_cxgb4"); 2880cfdda9d7SSteve Wise if (!workq) 2881cfdda9d7SSteve Wise return -ENOMEM; 2882cfdda9d7SSteve Wise 2883cfdda9d7SSteve Wise return 0; 2884cfdda9d7SSteve Wise } 2885cfdda9d7SSteve Wise 2886cfdda9d7SSteve Wise void __exit c4iw_cm_term(void) 2887cfdda9d7SSteve Wise { 2888be4c9badSRoland Dreier WARN_ON(!list_empty(&timeout_list)); 2889cfdda9d7SSteve Wise flush_workqueue(workq); 2890cfdda9d7SSteve Wise destroy_workqueue(workq); 2891cfdda9d7SSteve Wise } 2892