1f7917c00SJeff Kirsher /* 2f7917c00SJeff Kirsher * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved. 3f7917c00SJeff Kirsher * 4f7917c00SJeff Kirsher * This software is available to you under a choice of one of two 5f7917c00SJeff Kirsher * licenses. You may choose to be licensed under the terms of the GNU 6f7917c00SJeff Kirsher * General Public License (GPL) Version 2, available from the file 7f7917c00SJeff Kirsher * COPYING in the main directory of this source tree, or the 8f7917c00SJeff Kirsher * OpenIB.org BSD license below: 9f7917c00SJeff Kirsher * 10f7917c00SJeff Kirsher * Redistribution and use in source and binary forms, with or 11f7917c00SJeff Kirsher * without modification, are permitted provided that the following 12f7917c00SJeff Kirsher * conditions are met: 13f7917c00SJeff Kirsher * 14f7917c00SJeff Kirsher * - Redistributions of source code must retain the above 15f7917c00SJeff Kirsher * copyright notice, this list of conditions and the following 16f7917c00SJeff Kirsher * disclaimer. 17f7917c00SJeff Kirsher * 18f7917c00SJeff Kirsher * - Redistributions in binary form must reproduce the above 19f7917c00SJeff Kirsher * copyright notice, this list of conditions and the following 20f7917c00SJeff Kirsher * disclaimer in the documentation and/or other materials 21f7917c00SJeff Kirsher * provided with the distribution. 22f7917c00SJeff Kirsher * 23f7917c00SJeff Kirsher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24f7917c00SJeff Kirsher * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25f7917c00SJeff Kirsher * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26f7917c00SJeff Kirsher * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27f7917c00SJeff Kirsher * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28f7917c00SJeff Kirsher * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29f7917c00SJeff Kirsher * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30f7917c00SJeff Kirsher * SOFTWARE. 31f7917c00SJeff Kirsher */ 32f7917c00SJeff Kirsher 33f7917c00SJeff Kirsher #include <linux/list.h> 34f7917c00SJeff Kirsher #include <linux/slab.h> 35f7917c00SJeff Kirsher #include <net/neighbour.h> 36f7917c00SJeff Kirsher #include <linux/notifier.h> 37f7917c00SJeff Kirsher #include <linux/atomic.h> 38f7917c00SJeff Kirsher #include <linux/proc_fs.h> 39f7917c00SJeff Kirsher #include <linux/if_vlan.h> 40f7917c00SJeff Kirsher #include <net/netevent.h> 41f7917c00SJeff Kirsher #include <linux/highmem.h> 42f7917c00SJeff Kirsher #include <linux/vmalloc.h> 43ee40fa06SPaul Gortmaker #include <linux/export.h> 44f7917c00SJeff Kirsher 45f7917c00SJeff Kirsher #include "common.h" 46f7917c00SJeff Kirsher #include "regs.h" 47f7917c00SJeff Kirsher #include "cxgb3_ioctl.h" 48f7917c00SJeff Kirsher #include "cxgb3_ctl_defs.h" 49f7917c00SJeff Kirsher #include "cxgb3_defs.h" 50f7917c00SJeff Kirsher #include "l2t.h" 51f7917c00SJeff Kirsher #include "firmware_exports.h" 52f7917c00SJeff Kirsher #include "cxgb3_offload.h" 53f7917c00SJeff Kirsher 54f7917c00SJeff Kirsher static LIST_HEAD(client_list); 55f7917c00SJeff Kirsher static LIST_HEAD(ofld_dev_list); 56f7917c00SJeff Kirsher static DEFINE_MUTEX(cxgb3_db_lock); 57f7917c00SJeff Kirsher 58f7917c00SJeff Kirsher static DEFINE_RWLOCK(adapter_list_lock); 59f7917c00SJeff Kirsher static LIST_HEAD(adapter_list); 60f7917c00SJeff Kirsher 61f7917c00SJeff Kirsher static const unsigned int MAX_ATIDS = 64 * 1024; 62f7917c00SJeff Kirsher static const unsigned int ATID_BASE = 0x10000; 63f7917c00SJeff Kirsher 64f7917c00SJeff Kirsher static void cxgb_neigh_update(struct neighbour *neigh); 65f7917c00SJeff Kirsher static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new); 66f7917c00SJeff Kirsher 67f7917c00SJeff Kirsher static inline int offload_activated(struct t3cdev *tdev) 68f7917c00SJeff Kirsher { 69f7917c00SJeff Kirsher const struct adapter *adapter = tdev2adap(tdev); 70f7917c00SJeff Kirsher 71f7917c00SJeff Kirsher return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); 72f7917c00SJeff Kirsher } 73f7917c00SJeff Kirsher 74f7917c00SJeff Kirsher /** 75f7917c00SJeff Kirsher * cxgb3_register_client - register an offload client 76f7917c00SJeff Kirsher * @client: the client 77f7917c00SJeff Kirsher * 78f7917c00SJeff Kirsher * Add the client to the client list, 79f7917c00SJeff Kirsher * and call backs the client for each activated offload device 80f7917c00SJeff Kirsher */ 81f7917c00SJeff Kirsher void cxgb3_register_client(struct cxgb3_client *client) 82f7917c00SJeff Kirsher { 83f7917c00SJeff Kirsher struct t3cdev *tdev; 84f7917c00SJeff Kirsher 85f7917c00SJeff Kirsher mutex_lock(&cxgb3_db_lock); 86f7917c00SJeff Kirsher list_add_tail(&client->client_list, &client_list); 87f7917c00SJeff Kirsher 88f7917c00SJeff Kirsher if (client->add) { 89f7917c00SJeff Kirsher list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) { 90f7917c00SJeff Kirsher if (offload_activated(tdev)) 91f7917c00SJeff Kirsher client->add(tdev); 92f7917c00SJeff Kirsher } 93f7917c00SJeff Kirsher } 94f7917c00SJeff Kirsher mutex_unlock(&cxgb3_db_lock); 95f7917c00SJeff Kirsher } 96f7917c00SJeff Kirsher 97f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb3_register_client); 98f7917c00SJeff Kirsher 99f7917c00SJeff Kirsher /** 100f7917c00SJeff Kirsher * cxgb3_unregister_client - unregister an offload client 101f7917c00SJeff Kirsher * @client: the client 102f7917c00SJeff Kirsher * 103f7917c00SJeff Kirsher * Remove the client to the client list, 104f7917c00SJeff Kirsher * and call backs the client for each activated offload device. 105f7917c00SJeff Kirsher */ 106f7917c00SJeff Kirsher void cxgb3_unregister_client(struct cxgb3_client *client) 107f7917c00SJeff Kirsher { 108f7917c00SJeff Kirsher struct t3cdev *tdev; 109f7917c00SJeff Kirsher 110f7917c00SJeff Kirsher mutex_lock(&cxgb3_db_lock); 111f7917c00SJeff Kirsher list_del(&client->client_list); 112f7917c00SJeff Kirsher 113f7917c00SJeff Kirsher if (client->remove) { 114f7917c00SJeff Kirsher list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) { 115f7917c00SJeff Kirsher if (offload_activated(tdev)) 116f7917c00SJeff Kirsher client->remove(tdev); 117f7917c00SJeff Kirsher } 118f7917c00SJeff Kirsher } 119f7917c00SJeff Kirsher mutex_unlock(&cxgb3_db_lock); 120f7917c00SJeff Kirsher } 121f7917c00SJeff Kirsher 122f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb3_unregister_client); 123f7917c00SJeff Kirsher 124f7917c00SJeff Kirsher /** 125f7917c00SJeff Kirsher * cxgb3_add_clients - activate registered clients for an offload device 126f7917c00SJeff Kirsher * @tdev: the offload device 127f7917c00SJeff Kirsher * 128f7917c00SJeff Kirsher * Call backs all registered clients once a offload device is activated 129f7917c00SJeff Kirsher */ 130f7917c00SJeff Kirsher void cxgb3_add_clients(struct t3cdev *tdev) 131f7917c00SJeff Kirsher { 132f7917c00SJeff Kirsher struct cxgb3_client *client; 133f7917c00SJeff Kirsher 134f7917c00SJeff Kirsher mutex_lock(&cxgb3_db_lock); 135f7917c00SJeff Kirsher list_for_each_entry(client, &client_list, client_list) { 136f7917c00SJeff Kirsher if (client->add) 137f7917c00SJeff Kirsher client->add(tdev); 138f7917c00SJeff Kirsher } 139f7917c00SJeff Kirsher mutex_unlock(&cxgb3_db_lock); 140f7917c00SJeff Kirsher } 141f7917c00SJeff Kirsher 142f7917c00SJeff Kirsher /** 143f7917c00SJeff Kirsher * cxgb3_remove_clients - deactivates registered clients 144f7917c00SJeff Kirsher * for an offload device 145f7917c00SJeff Kirsher * @tdev: the offload device 146f7917c00SJeff Kirsher * 147f7917c00SJeff Kirsher * Call backs all registered clients once a offload device is deactivated 148f7917c00SJeff Kirsher */ 149f7917c00SJeff Kirsher void cxgb3_remove_clients(struct t3cdev *tdev) 150f7917c00SJeff Kirsher { 151f7917c00SJeff Kirsher struct cxgb3_client *client; 152f7917c00SJeff Kirsher 153f7917c00SJeff Kirsher mutex_lock(&cxgb3_db_lock); 154f7917c00SJeff Kirsher list_for_each_entry(client, &client_list, client_list) { 155f7917c00SJeff Kirsher if (client->remove) 156f7917c00SJeff Kirsher client->remove(tdev); 157f7917c00SJeff Kirsher } 158f7917c00SJeff Kirsher mutex_unlock(&cxgb3_db_lock); 159f7917c00SJeff Kirsher } 160f7917c00SJeff Kirsher 161f7917c00SJeff Kirsher void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port) 162f7917c00SJeff Kirsher { 163f7917c00SJeff Kirsher struct cxgb3_client *client; 164f7917c00SJeff Kirsher 165f7917c00SJeff Kirsher mutex_lock(&cxgb3_db_lock); 166f7917c00SJeff Kirsher list_for_each_entry(client, &client_list, client_list) { 167f7917c00SJeff Kirsher if (client->event_handler) 168f7917c00SJeff Kirsher client->event_handler(tdev, event, port); 169f7917c00SJeff Kirsher } 170f7917c00SJeff Kirsher mutex_unlock(&cxgb3_db_lock); 171f7917c00SJeff Kirsher } 172f7917c00SJeff Kirsher 173f7917c00SJeff Kirsher static struct net_device *get_iff_from_mac(struct adapter *adapter, 174f7917c00SJeff Kirsher const unsigned char *mac, 175f7917c00SJeff Kirsher unsigned int vlan) 176f7917c00SJeff Kirsher { 177f7917c00SJeff Kirsher int i; 178f7917c00SJeff Kirsher 179f7917c00SJeff Kirsher for_each_port(adapter, i) { 180f7917c00SJeff Kirsher struct net_device *dev = adapter->port[i]; 181f7917c00SJeff Kirsher 182f7917c00SJeff Kirsher if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) { 183f7917c00SJeff Kirsher if (vlan && vlan != VLAN_VID_MASK) { 184f7917c00SJeff Kirsher rcu_read_lock(); 185f7917c00SJeff Kirsher dev = __vlan_find_dev_deep(dev, vlan); 186f7917c00SJeff Kirsher rcu_read_unlock(); 187f7917c00SJeff Kirsher } else if (netif_is_bond_slave(dev)) { 188f7917c00SJeff Kirsher while (dev->master) 189f7917c00SJeff Kirsher dev = dev->master; 190f7917c00SJeff Kirsher } 191f7917c00SJeff Kirsher return dev; 192f7917c00SJeff Kirsher } 193f7917c00SJeff Kirsher } 194f7917c00SJeff Kirsher return NULL; 195f7917c00SJeff Kirsher } 196f7917c00SJeff Kirsher 197f7917c00SJeff Kirsher static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req, 198f7917c00SJeff Kirsher void *data) 199f7917c00SJeff Kirsher { 200f7917c00SJeff Kirsher int i; 201f7917c00SJeff Kirsher int ret = 0; 202f7917c00SJeff Kirsher unsigned int val = 0; 203f7917c00SJeff Kirsher struct ulp_iscsi_info *uiip = data; 204f7917c00SJeff Kirsher 205f7917c00SJeff Kirsher switch (req) { 206f7917c00SJeff Kirsher case ULP_ISCSI_GET_PARAMS: 207f7917c00SJeff Kirsher uiip->pdev = adapter->pdev; 208f7917c00SJeff Kirsher uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT); 209f7917c00SJeff Kirsher uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT); 210f7917c00SJeff Kirsher uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK); 211f7917c00SJeff Kirsher 212f7917c00SJeff Kirsher val = t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ); 213f7917c00SJeff Kirsher for (i = 0; i < 4; i++, val >>= 8) 214f7917c00SJeff Kirsher uiip->pgsz_factor[i] = val & 0xFF; 215f7917c00SJeff Kirsher 216f7917c00SJeff Kirsher val = t3_read_reg(adapter, A_TP_PARA_REG7); 217f7917c00SJeff Kirsher uiip->max_txsz = 218f7917c00SJeff Kirsher uiip->max_rxsz = min((val >> S_PMMAXXFERLEN0)&M_PMMAXXFERLEN0, 219f7917c00SJeff Kirsher (val >> S_PMMAXXFERLEN1)&M_PMMAXXFERLEN1); 220f7917c00SJeff Kirsher /* 221f7917c00SJeff Kirsher * On tx, the iscsi pdu has to be <= tx page size and has to 222f7917c00SJeff Kirsher * fit into the Tx PM FIFO. 223f7917c00SJeff Kirsher */ 224f7917c00SJeff Kirsher val = min(adapter->params.tp.tx_pg_size, 225f7917c00SJeff Kirsher t3_read_reg(adapter, A_PM1_TX_CFG) >> 17); 226f7917c00SJeff Kirsher uiip->max_txsz = min(val, uiip->max_txsz); 227f7917c00SJeff Kirsher 228f7917c00SJeff Kirsher /* set MaxRxData to 16224 */ 229f7917c00SJeff Kirsher val = t3_read_reg(adapter, A_TP_PARA_REG2); 230f7917c00SJeff Kirsher if ((val >> S_MAXRXDATA) != 0x3f60) { 231f7917c00SJeff Kirsher val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE); 232f7917c00SJeff Kirsher val |= V_MAXRXDATA(0x3f60); 233f7917c00SJeff Kirsher printk(KERN_INFO 234f7917c00SJeff Kirsher "%s, iscsi set MaxRxData to 16224 (0x%x).\n", 235f7917c00SJeff Kirsher adapter->name, val); 236f7917c00SJeff Kirsher t3_write_reg(adapter, A_TP_PARA_REG2, val); 237f7917c00SJeff Kirsher } 238f7917c00SJeff Kirsher 239f7917c00SJeff Kirsher /* 240f7917c00SJeff Kirsher * on rx, the iscsi pdu has to be < rx page size and the 241f7917c00SJeff Kirsher * the max rx data length programmed in TP 242f7917c00SJeff Kirsher */ 243f7917c00SJeff Kirsher val = min(adapter->params.tp.rx_pg_size, 244f7917c00SJeff Kirsher ((t3_read_reg(adapter, A_TP_PARA_REG2)) >> 245f7917c00SJeff Kirsher S_MAXRXDATA) & M_MAXRXDATA); 246f7917c00SJeff Kirsher uiip->max_rxsz = min(val, uiip->max_rxsz); 247f7917c00SJeff Kirsher break; 248f7917c00SJeff Kirsher case ULP_ISCSI_SET_PARAMS: 249f7917c00SJeff Kirsher t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask); 250f7917c00SJeff Kirsher /* program the ddp page sizes */ 251f7917c00SJeff Kirsher for (i = 0; i < 4; i++) 252f7917c00SJeff Kirsher val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i); 253f7917c00SJeff Kirsher if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) { 254f7917c00SJeff Kirsher printk(KERN_INFO 255f7917c00SJeff Kirsher "%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u.\n", 256f7917c00SJeff Kirsher adapter->name, val, uiip->pgsz_factor[0], 257f7917c00SJeff Kirsher uiip->pgsz_factor[1], uiip->pgsz_factor[2], 258f7917c00SJeff Kirsher uiip->pgsz_factor[3]); 259f7917c00SJeff Kirsher t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val); 260f7917c00SJeff Kirsher } 261f7917c00SJeff Kirsher break; 262f7917c00SJeff Kirsher default: 263f7917c00SJeff Kirsher ret = -EOPNOTSUPP; 264f7917c00SJeff Kirsher } 265f7917c00SJeff Kirsher return ret; 266f7917c00SJeff Kirsher } 267f7917c00SJeff Kirsher 268f7917c00SJeff Kirsher /* Response queue used for RDMA events. */ 269f7917c00SJeff Kirsher #define ASYNC_NOTIF_RSPQ 0 270f7917c00SJeff Kirsher 271f7917c00SJeff Kirsher static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data) 272f7917c00SJeff Kirsher { 273f7917c00SJeff Kirsher int ret = 0; 274f7917c00SJeff Kirsher 275f7917c00SJeff Kirsher switch (req) { 276f7917c00SJeff Kirsher case RDMA_GET_PARAMS: { 277f7917c00SJeff Kirsher struct rdma_info *rdma = data; 278f7917c00SJeff Kirsher struct pci_dev *pdev = adapter->pdev; 279f7917c00SJeff Kirsher 280f7917c00SJeff Kirsher rdma->udbell_physbase = pci_resource_start(pdev, 2); 281f7917c00SJeff Kirsher rdma->udbell_len = pci_resource_len(pdev, 2); 282f7917c00SJeff Kirsher rdma->tpt_base = 283f7917c00SJeff Kirsher t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT); 284f7917c00SJeff Kirsher rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT); 285f7917c00SJeff Kirsher rdma->pbl_base = 286f7917c00SJeff Kirsher t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT); 287f7917c00SJeff Kirsher rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT); 288f7917c00SJeff Kirsher rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT); 289f7917c00SJeff Kirsher rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT); 290f7917c00SJeff Kirsher rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL; 291f7917c00SJeff Kirsher rdma->pdev = pdev; 292f7917c00SJeff Kirsher break; 293f7917c00SJeff Kirsher } 294f7917c00SJeff Kirsher case RDMA_CQ_OP:{ 295f7917c00SJeff Kirsher unsigned long flags; 296f7917c00SJeff Kirsher struct rdma_cq_op *rdma = data; 297f7917c00SJeff Kirsher 298f7917c00SJeff Kirsher /* may be called in any context */ 299f7917c00SJeff Kirsher spin_lock_irqsave(&adapter->sge.reg_lock, flags); 300f7917c00SJeff Kirsher ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op, 301f7917c00SJeff Kirsher rdma->credits); 302f7917c00SJeff Kirsher spin_unlock_irqrestore(&adapter->sge.reg_lock, flags); 303f7917c00SJeff Kirsher break; 304f7917c00SJeff Kirsher } 305f7917c00SJeff Kirsher case RDMA_GET_MEM:{ 306f7917c00SJeff Kirsher struct ch_mem_range *t = data; 307f7917c00SJeff Kirsher struct mc7 *mem; 308f7917c00SJeff Kirsher 309f7917c00SJeff Kirsher if ((t->addr & 7) || (t->len & 7)) 310f7917c00SJeff Kirsher return -EINVAL; 311f7917c00SJeff Kirsher if (t->mem_id == MEM_CM) 312f7917c00SJeff Kirsher mem = &adapter->cm; 313f7917c00SJeff Kirsher else if (t->mem_id == MEM_PMRX) 314f7917c00SJeff Kirsher mem = &adapter->pmrx; 315f7917c00SJeff Kirsher else if (t->mem_id == MEM_PMTX) 316f7917c00SJeff Kirsher mem = &adapter->pmtx; 317f7917c00SJeff Kirsher else 318f7917c00SJeff Kirsher return -EINVAL; 319f7917c00SJeff Kirsher 320f7917c00SJeff Kirsher ret = 321f7917c00SJeff Kirsher t3_mc7_bd_read(mem, t->addr / 8, t->len / 8, 322f7917c00SJeff Kirsher (u64 *) t->buf); 323f7917c00SJeff Kirsher if (ret) 324f7917c00SJeff Kirsher return ret; 325f7917c00SJeff Kirsher break; 326f7917c00SJeff Kirsher } 327f7917c00SJeff Kirsher case RDMA_CQ_SETUP:{ 328f7917c00SJeff Kirsher struct rdma_cq_setup *rdma = data; 329f7917c00SJeff Kirsher 330f7917c00SJeff Kirsher spin_lock_irq(&adapter->sge.reg_lock); 331f7917c00SJeff Kirsher ret = 332f7917c00SJeff Kirsher t3_sge_init_cqcntxt(adapter, rdma->id, 333f7917c00SJeff Kirsher rdma->base_addr, rdma->size, 334f7917c00SJeff Kirsher ASYNC_NOTIF_RSPQ, 335f7917c00SJeff Kirsher rdma->ovfl_mode, rdma->credits, 336f7917c00SJeff Kirsher rdma->credit_thres); 337f7917c00SJeff Kirsher spin_unlock_irq(&adapter->sge.reg_lock); 338f7917c00SJeff Kirsher break; 339f7917c00SJeff Kirsher } 340f7917c00SJeff Kirsher case RDMA_CQ_DISABLE: 341f7917c00SJeff Kirsher spin_lock_irq(&adapter->sge.reg_lock); 342f7917c00SJeff Kirsher ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data); 343f7917c00SJeff Kirsher spin_unlock_irq(&adapter->sge.reg_lock); 344f7917c00SJeff Kirsher break; 345f7917c00SJeff Kirsher case RDMA_CTRL_QP_SETUP:{ 346f7917c00SJeff Kirsher struct rdma_ctrlqp_setup *rdma = data; 347f7917c00SJeff Kirsher 348f7917c00SJeff Kirsher spin_lock_irq(&adapter->sge.reg_lock); 349f7917c00SJeff Kirsher ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0, 350f7917c00SJeff Kirsher SGE_CNTXT_RDMA, 351f7917c00SJeff Kirsher ASYNC_NOTIF_RSPQ, 352f7917c00SJeff Kirsher rdma->base_addr, rdma->size, 353f7917c00SJeff Kirsher FW_RI_TID_START, 1, 0); 354f7917c00SJeff Kirsher spin_unlock_irq(&adapter->sge.reg_lock); 355f7917c00SJeff Kirsher break; 356f7917c00SJeff Kirsher } 357f7917c00SJeff Kirsher case RDMA_GET_MIB: { 358f7917c00SJeff Kirsher spin_lock(&adapter->stats_lock); 359f7917c00SJeff Kirsher t3_tp_get_mib_stats(adapter, (struct tp_mib_stats *)data); 360f7917c00SJeff Kirsher spin_unlock(&adapter->stats_lock); 361f7917c00SJeff Kirsher break; 362f7917c00SJeff Kirsher } 363f7917c00SJeff Kirsher default: 364f7917c00SJeff Kirsher ret = -EOPNOTSUPP; 365f7917c00SJeff Kirsher } 366f7917c00SJeff Kirsher return ret; 367f7917c00SJeff Kirsher } 368f7917c00SJeff Kirsher 369f7917c00SJeff Kirsher static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data) 370f7917c00SJeff Kirsher { 371f7917c00SJeff Kirsher struct adapter *adapter = tdev2adap(tdev); 372f7917c00SJeff Kirsher struct tid_range *tid; 373f7917c00SJeff Kirsher struct mtutab *mtup; 374f7917c00SJeff Kirsher struct iff_mac *iffmacp; 375f7917c00SJeff Kirsher struct ddp_params *ddpp; 376f7917c00SJeff Kirsher struct adap_ports *ports; 377f7917c00SJeff Kirsher struct ofld_page_info *rx_page_info; 378f7917c00SJeff Kirsher struct tp_params *tp = &adapter->params.tp; 379f7917c00SJeff Kirsher int i; 380f7917c00SJeff Kirsher 381f7917c00SJeff Kirsher switch (req) { 382f7917c00SJeff Kirsher case GET_MAX_OUTSTANDING_WR: 383f7917c00SJeff Kirsher *(unsigned int *)data = FW_WR_NUM; 384f7917c00SJeff Kirsher break; 385f7917c00SJeff Kirsher case GET_WR_LEN: 386f7917c00SJeff Kirsher *(unsigned int *)data = WR_FLITS; 387f7917c00SJeff Kirsher break; 388f7917c00SJeff Kirsher case GET_TX_MAX_CHUNK: 389f7917c00SJeff Kirsher *(unsigned int *)data = 1 << 20; /* 1MB */ 390f7917c00SJeff Kirsher break; 391f7917c00SJeff Kirsher case GET_TID_RANGE: 392f7917c00SJeff Kirsher tid = data; 393f7917c00SJeff Kirsher tid->num = t3_mc5_size(&adapter->mc5) - 394f7917c00SJeff Kirsher adapter->params.mc5.nroutes - 395f7917c00SJeff Kirsher adapter->params.mc5.nfilters - adapter->params.mc5.nservers; 396f7917c00SJeff Kirsher tid->base = 0; 397f7917c00SJeff Kirsher break; 398f7917c00SJeff Kirsher case GET_STID_RANGE: 399f7917c00SJeff Kirsher tid = data; 400f7917c00SJeff Kirsher tid->num = adapter->params.mc5.nservers; 401f7917c00SJeff Kirsher tid->base = t3_mc5_size(&adapter->mc5) - tid->num - 402f7917c00SJeff Kirsher adapter->params.mc5.nfilters - adapter->params.mc5.nroutes; 403f7917c00SJeff Kirsher break; 404f7917c00SJeff Kirsher case GET_L2T_CAPACITY: 405f7917c00SJeff Kirsher *(unsigned int *)data = 2048; 406f7917c00SJeff Kirsher break; 407f7917c00SJeff Kirsher case GET_MTUS: 408f7917c00SJeff Kirsher mtup = data; 409f7917c00SJeff Kirsher mtup->size = NMTUS; 410f7917c00SJeff Kirsher mtup->mtus = adapter->params.mtus; 411f7917c00SJeff Kirsher break; 412f7917c00SJeff Kirsher case GET_IFF_FROM_MAC: 413f7917c00SJeff Kirsher iffmacp = data; 414f7917c00SJeff Kirsher iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr, 415f7917c00SJeff Kirsher iffmacp->vlan_tag & 416f7917c00SJeff Kirsher VLAN_VID_MASK); 417f7917c00SJeff Kirsher break; 418f7917c00SJeff Kirsher case GET_DDP_PARAMS: 419f7917c00SJeff Kirsher ddpp = data; 420f7917c00SJeff Kirsher ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT); 421f7917c00SJeff Kirsher ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT); 422f7917c00SJeff Kirsher ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK); 423f7917c00SJeff Kirsher break; 424f7917c00SJeff Kirsher case GET_PORTS: 425f7917c00SJeff Kirsher ports = data; 426f7917c00SJeff Kirsher ports->nports = adapter->params.nports; 427f7917c00SJeff Kirsher for_each_port(adapter, i) 428f7917c00SJeff Kirsher ports->lldevs[i] = adapter->port[i]; 429f7917c00SJeff Kirsher break; 430f7917c00SJeff Kirsher case ULP_ISCSI_GET_PARAMS: 431f7917c00SJeff Kirsher case ULP_ISCSI_SET_PARAMS: 432f7917c00SJeff Kirsher if (!offload_running(adapter)) 433f7917c00SJeff Kirsher return -EAGAIN; 434f7917c00SJeff Kirsher return cxgb_ulp_iscsi_ctl(adapter, req, data); 435f7917c00SJeff Kirsher case RDMA_GET_PARAMS: 436f7917c00SJeff Kirsher case RDMA_CQ_OP: 437f7917c00SJeff Kirsher case RDMA_CQ_SETUP: 438f7917c00SJeff Kirsher case RDMA_CQ_DISABLE: 439f7917c00SJeff Kirsher case RDMA_CTRL_QP_SETUP: 440f7917c00SJeff Kirsher case RDMA_GET_MEM: 441f7917c00SJeff Kirsher case RDMA_GET_MIB: 442f7917c00SJeff Kirsher if (!offload_running(adapter)) 443f7917c00SJeff Kirsher return -EAGAIN; 444f7917c00SJeff Kirsher return cxgb_rdma_ctl(adapter, req, data); 445f7917c00SJeff Kirsher case GET_RX_PAGE_INFO: 446f7917c00SJeff Kirsher rx_page_info = data; 447f7917c00SJeff Kirsher rx_page_info->page_size = tp->rx_pg_size; 448f7917c00SJeff Kirsher rx_page_info->num = tp->rx_num_pgs; 449f7917c00SJeff Kirsher break; 450f7917c00SJeff Kirsher case GET_ISCSI_IPV4ADDR: { 451f7917c00SJeff Kirsher struct iscsi_ipv4addr *p = data; 452f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(p->dev); 453f7917c00SJeff Kirsher p->ipv4addr = pi->iscsi_ipv4addr; 454f7917c00SJeff Kirsher break; 455f7917c00SJeff Kirsher } 456f7917c00SJeff Kirsher case GET_EMBEDDED_INFO: { 457f7917c00SJeff Kirsher struct ch_embedded_info *e = data; 458f7917c00SJeff Kirsher 459f7917c00SJeff Kirsher spin_lock(&adapter->stats_lock); 460f7917c00SJeff Kirsher t3_get_fw_version(adapter, &e->fw_vers); 461f7917c00SJeff Kirsher t3_get_tp_version(adapter, &e->tp_vers); 462f7917c00SJeff Kirsher spin_unlock(&adapter->stats_lock); 463f7917c00SJeff Kirsher break; 464f7917c00SJeff Kirsher } 465f7917c00SJeff Kirsher default: 466f7917c00SJeff Kirsher return -EOPNOTSUPP; 467f7917c00SJeff Kirsher } 468f7917c00SJeff Kirsher return 0; 469f7917c00SJeff Kirsher } 470f7917c00SJeff Kirsher 471f7917c00SJeff Kirsher /* 472f7917c00SJeff Kirsher * Dummy handler for Rx offload packets in case we get an offload packet before 473f7917c00SJeff Kirsher * proper processing is setup. This complains and drops the packet as it isn't 474f7917c00SJeff Kirsher * normal to get offload packets at this stage. 475f7917c00SJeff Kirsher */ 476f7917c00SJeff Kirsher static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs, 477f7917c00SJeff Kirsher int n) 478f7917c00SJeff Kirsher { 479f7917c00SJeff Kirsher while (n--) 480f7917c00SJeff Kirsher dev_kfree_skb_any(skbs[n]); 481f7917c00SJeff Kirsher return 0; 482f7917c00SJeff Kirsher } 483f7917c00SJeff Kirsher 484f7917c00SJeff Kirsher static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh) 485f7917c00SJeff Kirsher { 486f7917c00SJeff Kirsher } 487f7917c00SJeff Kirsher 488f7917c00SJeff Kirsher void cxgb3_set_dummy_ops(struct t3cdev *dev) 489f7917c00SJeff Kirsher { 490f7917c00SJeff Kirsher dev->recv = rx_offload_blackhole; 491f7917c00SJeff Kirsher dev->neigh_update = dummy_neigh_update; 492f7917c00SJeff Kirsher } 493f7917c00SJeff Kirsher 494f7917c00SJeff Kirsher /* 495f7917c00SJeff Kirsher * Free an active-open TID. 496f7917c00SJeff Kirsher */ 497f7917c00SJeff Kirsher void *cxgb3_free_atid(struct t3cdev *tdev, int atid) 498f7917c00SJeff Kirsher { 499f7917c00SJeff Kirsher struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 500f7917c00SJeff Kirsher union active_open_entry *p = atid2entry(t, atid); 501f7917c00SJeff Kirsher void *ctx = p->t3c_tid.ctx; 502f7917c00SJeff Kirsher 503f7917c00SJeff Kirsher spin_lock_bh(&t->atid_lock); 504f7917c00SJeff Kirsher p->next = t->afree; 505f7917c00SJeff Kirsher t->afree = p; 506f7917c00SJeff Kirsher t->atids_in_use--; 507f7917c00SJeff Kirsher spin_unlock_bh(&t->atid_lock); 508f7917c00SJeff Kirsher 509f7917c00SJeff Kirsher return ctx; 510f7917c00SJeff Kirsher } 511f7917c00SJeff Kirsher 512f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb3_free_atid); 513f7917c00SJeff Kirsher 514f7917c00SJeff Kirsher /* 515f7917c00SJeff Kirsher * Free a server TID and return it to the free pool. 516f7917c00SJeff Kirsher */ 517f7917c00SJeff Kirsher void cxgb3_free_stid(struct t3cdev *tdev, int stid) 518f7917c00SJeff Kirsher { 519f7917c00SJeff Kirsher struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 520f7917c00SJeff Kirsher union listen_entry *p = stid2entry(t, stid); 521f7917c00SJeff Kirsher 522f7917c00SJeff Kirsher spin_lock_bh(&t->stid_lock); 523f7917c00SJeff Kirsher p->next = t->sfree; 524f7917c00SJeff Kirsher t->sfree = p; 525f7917c00SJeff Kirsher t->stids_in_use--; 526f7917c00SJeff Kirsher spin_unlock_bh(&t->stid_lock); 527f7917c00SJeff Kirsher } 528f7917c00SJeff Kirsher 529f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb3_free_stid); 530f7917c00SJeff Kirsher 531f7917c00SJeff Kirsher void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client, 532f7917c00SJeff Kirsher void *ctx, unsigned int tid) 533f7917c00SJeff Kirsher { 534f7917c00SJeff Kirsher struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 535f7917c00SJeff Kirsher 536f7917c00SJeff Kirsher t->tid_tab[tid].client = client; 537f7917c00SJeff Kirsher t->tid_tab[tid].ctx = ctx; 538f7917c00SJeff Kirsher atomic_inc(&t->tids_in_use); 539f7917c00SJeff Kirsher } 540f7917c00SJeff Kirsher 541f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb3_insert_tid); 542f7917c00SJeff Kirsher 543f7917c00SJeff Kirsher /* 544f7917c00SJeff Kirsher * Populate a TID_RELEASE WR. The skb must be already propely sized. 545f7917c00SJeff Kirsher */ 546f7917c00SJeff Kirsher static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid) 547f7917c00SJeff Kirsher { 548f7917c00SJeff Kirsher struct cpl_tid_release *req; 549f7917c00SJeff Kirsher 550f7917c00SJeff Kirsher skb->priority = CPL_PRIORITY_SETUP; 551f7917c00SJeff Kirsher req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); 552f7917c00SJeff Kirsher req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 553f7917c00SJeff Kirsher OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); 554f7917c00SJeff Kirsher } 555f7917c00SJeff Kirsher 556f7917c00SJeff Kirsher static void t3_process_tid_release_list(struct work_struct *work) 557f7917c00SJeff Kirsher { 558f7917c00SJeff Kirsher struct t3c_data *td = container_of(work, struct t3c_data, 559f7917c00SJeff Kirsher tid_release_task); 560f7917c00SJeff Kirsher struct sk_buff *skb; 561f7917c00SJeff Kirsher struct t3cdev *tdev = td->dev; 562f7917c00SJeff Kirsher 563f7917c00SJeff Kirsher 564f7917c00SJeff Kirsher spin_lock_bh(&td->tid_release_lock); 565f7917c00SJeff Kirsher while (td->tid_release_list) { 566f7917c00SJeff Kirsher struct t3c_tid_entry *p = td->tid_release_list; 567f7917c00SJeff Kirsher 568f7917c00SJeff Kirsher td->tid_release_list = p->ctx; 569f7917c00SJeff Kirsher spin_unlock_bh(&td->tid_release_lock); 570f7917c00SJeff Kirsher 571f7917c00SJeff Kirsher skb = alloc_skb(sizeof(struct cpl_tid_release), 572f7917c00SJeff Kirsher GFP_KERNEL); 573f7917c00SJeff Kirsher if (!skb) 574f7917c00SJeff Kirsher skb = td->nofail_skb; 575f7917c00SJeff Kirsher if (!skb) { 576f7917c00SJeff Kirsher spin_lock_bh(&td->tid_release_lock); 577f7917c00SJeff Kirsher p->ctx = (void *)td->tid_release_list; 578f7917c00SJeff Kirsher td->tid_release_list = (struct t3c_tid_entry *)p; 579f7917c00SJeff Kirsher break; 580f7917c00SJeff Kirsher } 581f7917c00SJeff Kirsher mk_tid_release(skb, p - td->tid_maps.tid_tab); 582f7917c00SJeff Kirsher cxgb3_ofld_send(tdev, skb); 583f7917c00SJeff Kirsher p->ctx = NULL; 584f7917c00SJeff Kirsher if (skb == td->nofail_skb) 585f7917c00SJeff Kirsher td->nofail_skb = 586f7917c00SJeff Kirsher alloc_skb(sizeof(struct cpl_tid_release), 587f7917c00SJeff Kirsher GFP_KERNEL); 588f7917c00SJeff Kirsher spin_lock_bh(&td->tid_release_lock); 589f7917c00SJeff Kirsher } 590f7917c00SJeff Kirsher td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1; 591f7917c00SJeff Kirsher spin_unlock_bh(&td->tid_release_lock); 592f7917c00SJeff Kirsher 593f7917c00SJeff Kirsher if (!td->nofail_skb) 594f7917c00SJeff Kirsher td->nofail_skb = 595f7917c00SJeff Kirsher alloc_skb(sizeof(struct cpl_tid_release), 596f7917c00SJeff Kirsher GFP_KERNEL); 597f7917c00SJeff Kirsher } 598f7917c00SJeff Kirsher 599f7917c00SJeff Kirsher /* use ctx as a next pointer in the tid release list */ 600f7917c00SJeff Kirsher void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid) 601f7917c00SJeff Kirsher { 602f7917c00SJeff Kirsher struct t3c_data *td = T3C_DATA(tdev); 603f7917c00SJeff Kirsher struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid]; 604f7917c00SJeff Kirsher 605f7917c00SJeff Kirsher spin_lock_bh(&td->tid_release_lock); 606f7917c00SJeff Kirsher p->ctx = (void *)td->tid_release_list; 607f7917c00SJeff Kirsher p->client = NULL; 608f7917c00SJeff Kirsher td->tid_release_list = p; 609f7917c00SJeff Kirsher if (!p->ctx || td->release_list_incomplete) 610f7917c00SJeff Kirsher schedule_work(&td->tid_release_task); 611f7917c00SJeff Kirsher spin_unlock_bh(&td->tid_release_lock); 612f7917c00SJeff Kirsher } 613f7917c00SJeff Kirsher 614f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb3_queue_tid_release); 615f7917c00SJeff Kirsher 616f7917c00SJeff Kirsher /* 617f7917c00SJeff Kirsher * Remove a tid from the TID table. A client may defer processing its last 618f7917c00SJeff Kirsher * CPL message if it is locked at the time it arrives, and while the message 619f7917c00SJeff Kirsher * sits in the client's backlog the TID may be reused for another connection. 620f7917c00SJeff Kirsher * To handle this we atomically switch the TID association if it still points 621f7917c00SJeff Kirsher * to the original client context. 622f7917c00SJeff Kirsher */ 623f7917c00SJeff Kirsher void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid) 624f7917c00SJeff Kirsher { 625f7917c00SJeff Kirsher struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 626f7917c00SJeff Kirsher 627f7917c00SJeff Kirsher BUG_ON(tid >= t->ntids); 628f7917c00SJeff Kirsher if (tdev->type == T3A) 629f7917c00SJeff Kirsher (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL); 630f7917c00SJeff Kirsher else { 631f7917c00SJeff Kirsher struct sk_buff *skb; 632f7917c00SJeff Kirsher 633f7917c00SJeff Kirsher skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); 634f7917c00SJeff Kirsher if (likely(skb)) { 635f7917c00SJeff Kirsher mk_tid_release(skb, tid); 636f7917c00SJeff Kirsher cxgb3_ofld_send(tdev, skb); 637f7917c00SJeff Kirsher t->tid_tab[tid].ctx = NULL; 638f7917c00SJeff Kirsher } else 639f7917c00SJeff Kirsher cxgb3_queue_tid_release(tdev, tid); 640f7917c00SJeff Kirsher } 641f7917c00SJeff Kirsher atomic_dec(&t->tids_in_use); 642f7917c00SJeff Kirsher } 643f7917c00SJeff Kirsher 644f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb3_remove_tid); 645f7917c00SJeff Kirsher 646f7917c00SJeff Kirsher int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client, 647f7917c00SJeff Kirsher void *ctx) 648f7917c00SJeff Kirsher { 649f7917c00SJeff Kirsher int atid = -1; 650f7917c00SJeff Kirsher struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 651f7917c00SJeff Kirsher 652f7917c00SJeff Kirsher spin_lock_bh(&t->atid_lock); 653f7917c00SJeff Kirsher if (t->afree && 654f7917c00SJeff Kirsher t->atids_in_use + atomic_read(&t->tids_in_use) + MC5_MIN_TIDS <= 655f7917c00SJeff Kirsher t->ntids) { 656f7917c00SJeff Kirsher union active_open_entry *p = t->afree; 657f7917c00SJeff Kirsher 658f7917c00SJeff Kirsher atid = (p - t->atid_tab) + t->atid_base; 659f7917c00SJeff Kirsher t->afree = p->next; 660f7917c00SJeff Kirsher p->t3c_tid.ctx = ctx; 661f7917c00SJeff Kirsher p->t3c_tid.client = client; 662f7917c00SJeff Kirsher t->atids_in_use++; 663f7917c00SJeff Kirsher } 664f7917c00SJeff Kirsher spin_unlock_bh(&t->atid_lock); 665f7917c00SJeff Kirsher return atid; 666f7917c00SJeff Kirsher } 667f7917c00SJeff Kirsher 668f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb3_alloc_atid); 669f7917c00SJeff Kirsher 670f7917c00SJeff Kirsher int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client, 671f7917c00SJeff Kirsher void *ctx) 672f7917c00SJeff Kirsher { 673f7917c00SJeff Kirsher int stid = -1; 674f7917c00SJeff Kirsher struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 675f7917c00SJeff Kirsher 676f7917c00SJeff Kirsher spin_lock_bh(&t->stid_lock); 677f7917c00SJeff Kirsher if (t->sfree) { 678f7917c00SJeff Kirsher union listen_entry *p = t->sfree; 679f7917c00SJeff Kirsher 680f7917c00SJeff Kirsher stid = (p - t->stid_tab) + t->stid_base; 681f7917c00SJeff Kirsher t->sfree = p->next; 682f7917c00SJeff Kirsher p->t3c_tid.ctx = ctx; 683f7917c00SJeff Kirsher p->t3c_tid.client = client; 684f7917c00SJeff Kirsher t->stids_in_use++; 685f7917c00SJeff Kirsher } 686f7917c00SJeff Kirsher spin_unlock_bh(&t->stid_lock); 687f7917c00SJeff Kirsher return stid; 688f7917c00SJeff Kirsher } 689f7917c00SJeff Kirsher 690f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb3_alloc_stid); 691f7917c00SJeff Kirsher 692f7917c00SJeff Kirsher /* Get the t3cdev associated with a net_device */ 693f7917c00SJeff Kirsher struct t3cdev *dev2t3cdev(struct net_device *dev) 694f7917c00SJeff Kirsher { 695f7917c00SJeff Kirsher const struct port_info *pi = netdev_priv(dev); 696f7917c00SJeff Kirsher 697f7917c00SJeff Kirsher return (struct t3cdev *)pi->adapter; 698f7917c00SJeff Kirsher } 699f7917c00SJeff Kirsher 700f7917c00SJeff Kirsher EXPORT_SYMBOL(dev2t3cdev); 701f7917c00SJeff Kirsher 702f7917c00SJeff Kirsher static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb) 703f7917c00SJeff Kirsher { 704f7917c00SJeff Kirsher struct cpl_smt_write_rpl *rpl = cplhdr(skb); 705f7917c00SJeff Kirsher 706f7917c00SJeff Kirsher if (rpl->status != CPL_ERR_NONE) 707f7917c00SJeff Kirsher printk(KERN_ERR 708f7917c00SJeff Kirsher "Unexpected SMT_WRITE_RPL status %u for entry %u\n", 709f7917c00SJeff Kirsher rpl->status, GET_TID(rpl)); 710f7917c00SJeff Kirsher 711f7917c00SJeff Kirsher return CPL_RET_BUF_DONE; 712f7917c00SJeff Kirsher } 713f7917c00SJeff Kirsher 714f7917c00SJeff Kirsher static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb) 715f7917c00SJeff Kirsher { 716f7917c00SJeff Kirsher struct cpl_l2t_write_rpl *rpl = cplhdr(skb); 717f7917c00SJeff Kirsher 718f7917c00SJeff Kirsher if (rpl->status != CPL_ERR_NONE) 719f7917c00SJeff Kirsher printk(KERN_ERR 720f7917c00SJeff Kirsher "Unexpected L2T_WRITE_RPL status %u for entry %u\n", 721f7917c00SJeff Kirsher rpl->status, GET_TID(rpl)); 722f7917c00SJeff Kirsher 723f7917c00SJeff Kirsher return CPL_RET_BUF_DONE; 724f7917c00SJeff Kirsher } 725f7917c00SJeff Kirsher 726f7917c00SJeff Kirsher static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb) 727f7917c00SJeff Kirsher { 728f7917c00SJeff Kirsher struct cpl_rte_write_rpl *rpl = cplhdr(skb); 729f7917c00SJeff Kirsher 730f7917c00SJeff Kirsher if (rpl->status != CPL_ERR_NONE) 731f7917c00SJeff Kirsher printk(KERN_ERR 732f7917c00SJeff Kirsher "Unexpected RTE_WRITE_RPL status %u for entry %u\n", 733f7917c00SJeff Kirsher rpl->status, GET_TID(rpl)); 734f7917c00SJeff Kirsher 735f7917c00SJeff Kirsher return CPL_RET_BUF_DONE; 736f7917c00SJeff Kirsher } 737f7917c00SJeff Kirsher 738f7917c00SJeff Kirsher static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb) 739f7917c00SJeff Kirsher { 740f7917c00SJeff Kirsher struct cpl_act_open_rpl *rpl = cplhdr(skb); 741f7917c00SJeff Kirsher unsigned int atid = G_TID(ntohl(rpl->atid)); 742f7917c00SJeff Kirsher struct t3c_tid_entry *t3c_tid; 743f7917c00SJeff Kirsher 744f7917c00SJeff Kirsher t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); 745f7917c00SJeff Kirsher if (t3c_tid && t3c_tid->ctx && t3c_tid->client && 746f7917c00SJeff Kirsher t3c_tid->client->handlers && 747f7917c00SJeff Kirsher t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) { 748f7917c00SJeff Kirsher return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb, 749f7917c00SJeff Kirsher t3c_tid-> 750f7917c00SJeff Kirsher ctx); 751f7917c00SJeff Kirsher } else { 752f7917c00SJeff Kirsher printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 753f7917c00SJeff Kirsher dev->name, CPL_ACT_OPEN_RPL); 754f7917c00SJeff Kirsher return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 755f7917c00SJeff Kirsher } 756f7917c00SJeff Kirsher } 757f7917c00SJeff Kirsher 758f7917c00SJeff Kirsher static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb) 759f7917c00SJeff Kirsher { 760f7917c00SJeff Kirsher union opcode_tid *p = cplhdr(skb); 761f7917c00SJeff Kirsher unsigned int stid = G_TID(ntohl(p->opcode_tid)); 762f7917c00SJeff Kirsher struct t3c_tid_entry *t3c_tid; 763f7917c00SJeff Kirsher 764f7917c00SJeff Kirsher t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid); 765f7917c00SJeff Kirsher if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 766f7917c00SJeff Kirsher t3c_tid->client->handlers[p->opcode]) { 767f7917c00SJeff Kirsher return t3c_tid->client->handlers[p->opcode] (dev, skb, 768f7917c00SJeff Kirsher t3c_tid->ctx); 769f7917c00SJeff Kirsher } else { 770f7917c00SJeff Kirsher printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 771f7917c00SJeff Kirsher dev->name, p->opcode); 772f7917c00SJeff Kirsher return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 773f7917c00SJeff Kirsher } 774f7917c00SJeff Kirsher } 775f7917c00SJeff Kirsher 776f7917c00SJeff Kirsher static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb) 777f7917c00SJeff Kirsher { 778f7917c00SJeff Kirsher union opcode_tid *p = cplhdr(skb); 779f7917c00SJeff Kirsher unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); 780f7917c00SJeff Kirsher struct t3c_tid_entry *t3c_tid; 781f7917c00SJeff Kirsher 782f7917c00SJeff Kirsher t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 783f7917c00SJeff Kirsher if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 784f7917c00SJeff Kirsher t3c_tid->client->handlers[p->opcode]) { 785f7917c00SJeff Kirsher return t3c_tid->client->handlers[p->opcode] 786f7917c00SJeff Kirsher (dev, skb, t3c_tid->ctx); 787f7917c00SJeff Kirsher } else { 788f7917c00SJeff Kirsher printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 789f7917c00SJeff Kirsher dev->name, p->opcode); 790f7917c00SJeff Kirsher return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 791f7917c00SJeff Kirsher } 792f7917c00SJeff Kirsher } 793f7917c00SJeff Kirsher 794f7917c00SJeff Kirsher static int do_cr(struct t3cdev *dev, struct sk_buff *skb) 795f7917c00SJeff Kirsher { 796f7917c00SJeff Kirsher struct cpl_pass_accept_req *req = cplhdr(skb); 797f7917c00SJeff Kirsher unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); 798f7917c00SJeff Kirsher struct tid_info *t = &(T3C_DATA(dev))->tid_maps; 799f7917c00SJeff Kirsher struct t3c_tid_entry *t3c_tid; 800f7917c00SJeff Kirsher unsigned int tid = GET_TID(req); 801f7917c00SJeff Kirsher 802f7917c00SJeff Kirsher if (unlikely(tid >= t->ntids)) { 803f7917c00SJeff Kirsher printk("%s: passive open TID %u too large\n", 804f7917c00SJeff Kirsher dev->name, tid); 805f7917c00SJeff Kirsher t3_fatal_err(tdev2adap(dev)); 806f7917c00SJeff Kirsher return CPL_RET_BUF_DONE; 807f7917c00SJeff Kirsher } 808f7917c00SJeff Kirsher 809f7917c00SJeff Kirsher t3c_tid = lookup_stid(t, stid); 810f7917c00SJeff Kirsher if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 811f7917c00SJeff Kirsher t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) { 812f7917c00SJeff Kirsher return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ] 813f7917c00SJeff Kirsher (dev, skb, t3c_tid->ctx); 814f7917c00SJeff Kirsher } else { 815f7917c00SJeff Kirsher printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 816f7917c00SJeff Kirsher dev->name, CPL_PASS_ACCEPT_REQ); 817f7917c00SJeff Kirsher return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 818f7917c00SJeff Kirsher } 819f7917c00SJeff Kirsher } 820f7917c00SJeff Kirsher 821f7917c00SJeff Kirsher /* 822f7917c00SJeff Kirsher * Returns an sk_buff for a reply CPL message of size len. If the input 823f7917c00SJeff Kirsher * sk_buff has no other users it is trimmed and reused, otherwise a new buffer 824f7917c00SJeff Kirsher * is allocated. The input skb must be of size at least len. Note that this 825f7917c00SJeff Kirsher * operation does not destroy the original skb data even if it decides to reuse 826f7917c00SJeff Kirsher * the buffer. 827f7917c00SJeff Kirsher */ 828f7917c00SJeff Kirsher static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len, 829f7917c00SJeff Kirsher gfp_t gfp) 830f7917c00SJeff Kirsher { 831f7917c00SJeff Kirsher if (likely(!skb_cloned(skb))) { 832f7917c00SJeff Kirsher BUG_ON(skb->len < len); 833f7917c00SJeff Kirsher __skb_trim(skb, len); 834f7917c00SJeff Kirsher skb_get(skb); 835f7917c00SJeff Kirsher } else { 836f7917c00SJeff Kirsher skb = alloc_skb(len, gfp); 837f7917c00SJeff Kirsher if (skb) 838f7917c00SJeff Kirsher __skb_put(skb, len); 839f7917c00SJeff Kirsher } 840f7917c00SJeff Kirsher return skb; 841f7917c00SJeff Kirsher } 842f7917c00SJeff Kirsher 843f7917c00SJeff Kirsher static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb) 844f7917c00SJeff Kirsher { 845f7917c00SJeff Kirsher union opcode_tid *p = cplhdr(skb); 846f7917c00SJeff Kirsher unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); 847f7917c00SJeff Kirsher struct t3c_tid_entry *t3c_tid; 848f7917c00SJeff Kirsher 849f7917c00SJeff Kirsher t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 850f7917c00SJeff Kirsher if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 851f7917c00SJeff Kirsher t3c_tid->client->handlers[p->opcode]) { 852f7917c00SJeff Kirsher return t3c_tid->client->handlers[p->opcode] 853f7917c00SJeff Kirsher (dev, skb, t3c_tid->ctx); 854f7917c00SJeff Kirsher } else { 855f7917c00SJeff Kirsher struct cpl_abort_req_rss *req = cplhdr(skb); 856f7917c00SJeff Kirsher struct cpl_abort_rpl *rpl; 857f7917c00SJeff Kirsher struct sk_buff *reply_skb; 858f7917c00SJeff Kirsher unsigned int tid = GET_TID(req); 859f7917c00SJeff Kirsher u8 cmd = req->status; 860f7917c00SJeff Kirsher 861f7917c00SJeff Kirsher if (req->status == CPL_ERR_RTX_NEG_ADVICE || 862f7917c00SJeff Kirsher req->status == CPL_ERR_PERSIST_NEG_ADVICE) 863f7917c00SJeff Kirsher goto out; 864f7917c00SJeff Kirsher 865f7917c00SJeff Kirsher reply_skb = cxgb3_get_cpl_reply_skb(skb, 866f7917c00SJeff Kirsher sizeof(struct 867f7917c00SJeff Kirsher cpl_abort_rpl), 868f7917c00SJeff Kirsher GFP_ATOMIC); 869f7917c00SJeff Kirsher 870f7917c00SJeff Kirsher if (!reply_skb) { 871f7917c00SJeff Kirsher printk("do_abort_req_rss: couldn't get skb!\n"); 872f7917c00SJeff Kirsher goto out; 873f7917c00SJeff Kirsher } 874f7917c00SJeff Kirsher reply_skb->priority = CPL_PRIORITY_DATA; 875f7917c00SJeff Kirsher __skb_put(reply_skb, sizeof(struct cpl_abort_rpl)); 876f7917c00SJeff Kirsher rpl = cplhdr(reply_skb); 877f7917c00SJeff Kirsher rpl->wr.wr_hi = 878f7917c00SJeff Kirsher htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); 879f7917c00SJeff Kirsher rpl->wr.wr_lo = htonl(V_WR_TID(tid)); 880f7917c00SJeff Kirsher OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid)); 881f7917c00SJeff Kirsher rpl->cmd = cmd; 882f7917c00SJeff Kirsher cxgb3_ofld_send(dev, reply_skb); 883f7917c00SJeff Kirsher out: 884f7917c00SJeff Kirsher return CPL_RET_BUF_DONE; 885f7917c00SJeff Kirsher } 886f7917c00SJeff Kirsher } 887f7917c00SJeff Kirsher 888f7917c00SJeff Kirsher static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb) 889f7917c00SJeff Kirsher { 890f7917c00SJeff Kirsher struct cpl_act_establish *req = cplhdr(skb); 891f7917c00SJeff Kirsher unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); 892f7917c00SJeff Kirsher struct tid_info *t = &(T3C_DATA(dev))->tid_maps; 893f7917c00SJeff Kirsher struct t3c_tid_entry *t3c_tid; 894f7917c00SJeff Kirsher unsigned int tid = GET_TID(req); 895f7917c00SJeff Kirsher 896f7917c00SJeff Kirsher if (unlikely(tid >= t->ntids)) { 897f7917c00SJeff Kirsher printk("%s: active establish TID %u too large\n", 898f7917c00SJeff Kirsher dev->name, tid); 899f7917c00SJeff Kirsher t3_fatal_err(tdev2adap(dev)); 900f7917c00SJeff Kirsher return CPL_RET_BUF_DONE; 901f7917c00SJeff Kirsher } 902f7917c00SJeff Kirsher 903f7917c00SJeff Kirsher t3c_tid = lookup_atid(t, atid); 904f7917c00SJeff Kirsher if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 905f7917c00SJeff Kirsher t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) { 906f7917c00SJeff Kirsher return t3c_tid->client->handlers[CPL_ACT_ESTABLISH] 907f7917c00SJeff Kirsher (dev, skb, t3c_tid->ctx); 908f7917c00SJeff Kirsher } else { 909f7917c00SJeff Kirsher printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 910f7917c00SJeff Kirsher dev->name, CPL_ACT_ESTABLISH); 911f7917c00SJeff Kirsher return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 912f7917c00SJeff Kirsher } 913f7917c00SJeff Kirsher } 914f7917c00SJeff Kirsher 915f7917c00SJeff Kirsher static int do_trace(struct t3cdev *dev, struct sk_buff *skb) 916f7917c00SJeff Kirsher { 917f7917c00SJeff Kirsher struct cpl_trace_pkt *p = cplhdr(skb); 918f7917c00SJeff Kirsher 919f7917c00SJeff Kirsher skb->protocol = htons(0xffff); 920f7917c00SJeff Kirsher skb->dev = dev->lldev; 921f7917c00SJeff Kirsher skb_pull(skb, sizeof(*p)); 922f7917c00SJeff Kirsher skb_reset_mac_header(skb); 923f7917c00SJeff Kirsher netif_receive_skb(skb); 924f7917c00SJeff Kirsher return 0; 925f7917c00SJeff Kirsher } 926f7917c00SJeff Kirsher 927f7917c00SJeff Kirsher /* 928f7917c00SJeff Kirsher * That skb would better have come from process_responses() where we abuse 929f7917c00SJeff Kirsher * ->priority and ->csum to carry our data. NB: if we get to per-arch 930f7917c00SJeff Kirsher * ->csum, the things might get really interesting here. 931f7917c00SJeff Kirsher */ 932f7917c00SJeff Kirsher 933f7917c00SJeff Kirsher static inline u32 get_hwtid(struct sk_buff *skb) 934f7917c00SJeff Kirsher { 935f7917c00SJeff Kirsher return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff; 936f7917c00SJeff Kirsher } 937f7917c00SJeff Kirsher 938f7917c00SJeff Kirsher static inline u32 get_opcode(struct sk_buff *skb) 939f7917c00SJeff Kirsher { 940f7917c00SJeff Kirsher return G_OPCODE(ntohl((__force __be32)skb->csum)); 941f7917c00SJeff Kirsher } 942f7917c00SJeff Kirsher 943f7917c00SJeff Kirsher static int do_term(struct t3cdev *dev, struct sk_buff *skb) 944f7917c00SJeff Kirsher { 945f7917c00SJeff Kirsher unsigned int hwtid = get_hwtid(skb); 946f7917c00SJeff Kirsher unsigned int opcode = get_opcode(skb); 947f7917c00SJeff Kirsher struct t3c_tid_entry *t3c_tid; 948f7917c00SJeff Kirsher 949f7917c00SJeff Kirsher t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 950f7917c00SJeff Kirsher if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 951f7917c00SJeff Kirsher t3c_tid->client->handlers[opcode]) { 952f7917c00SJeff Kirsher return t3c_tid->client->handlers[opcode] (dev, skb, 953f7917c00SJeff Kirsher t3c_tid->ctx); 954f7917c00SJeff Kirsher } else { 955f7917c00SJeff Kirsher printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 956f7917c00SJeff Kirsher dev->name, opcode); 957f7917c00SJeff Kirsher return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 958f7917c00SJeff Kirsher } 959f7917c00SJeff Kirsher } 960f7917c00SJeff Kirsher 961f7917c00SJeff Kirsher static int nb_callback(struct notifier_block *self, unsigned long event, 962f7917c00SJeff Kirsher void *ctx) 963f7917c00SJeff Kirsher { 964f7917c00SJeff Kirsher switch (event) { 965f7917c00SJeff Kirsher case (NETEVENT_NEIGH_UPDATE):{ 966f7917c00SJeff Kirsher cxgb_neigh_update((struct neighbour *)ctx); 967f7917c00SJeff Kirsher break; 968f7917c00SJeff Kirsher } 969f7917c00SJeff Kirsher case (NETEVENT_REDIRECT):{ 970f7917c00SJeff Kirsher struct netevent_redirect *nr = ctx; 971f7917c00SJeff Kirsher cxgb_redirect(nr->old, nr->new); 97227217455SDavid Miller cxgb_neigh_update(dst_get_neighbour_noref(nr->new)); 973f7917c00SJeff Kirsher break; 974f7917c00SJeff Kirsher } 975f7917c00SJeff Kirsher default: 976f7917c00SJeff Kirsher break; 977f7917c00SJeff Kirsher } 978f7917c00SJeff Kirsher return 0; 979f7917c00SJeff Kirsher } 980f7917c00SJeff Kirsher 981f7917c00SJeff Kirsher static struct notifier_block nb = { 982f7917c00SJeff Kirsher .notifier_call = nb_callback 983f7917c00SJeff Kirsher }; 984f7917c00SJeff Kirsher 985f7917c00SJeff Kirsher /* 986f7917c00SJeff Kirsher * Process a received packet with an unknown/unexpected CPL opcode. 987f7917c00SJeff Kirsher */ 988f7917c00SJeff Kirsher static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb) 989f7917c00SJeff Kirsher { 990f7917c00SJeff Kirsher printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name, 991f7917c00SJeff Kirsher *skb->data); 992f7917c00SJeff Kirsher return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 993f7917c00SJeff Kirsher } 994f7917c00SJeff Kirsher 995f7917c00SJeff Kirsher /* 996f7917c00SJeff Kirsher * Handlers for each CPL opcode 997f7917c00SJeff Kirsher */ 998f7917c00SJeff Kirsher static cpl_handler_func cpl_handlers[NUM_CPL_CMDS]; 999f7917c00SJeff Kirsher 1000f7917c00SJeff Kirsher /* 1001f7917c00SJeff Kirsher * Add a new handler to the CPL dispatch table. A NULL handler may be supplied 1002f7917c00SJeff Kirsher * to unregister an existing handler. 1003f7917c00SJeff Kirsher */ 1004f7917c00SJeff Kirsher void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h) 1005f7917c00SJeff Kirsher { 1006f7917c00SJeff Kirsher if (opcode < NUM_CPL_CMDS) 1007f7917c00SJeff Kirsher cpl_handlers[opcode] = h ? h : do_bad_cpl; 1008f7917c00SJeff Kirsher else 1009f7917c00SJeff Kirsher printk(KERN_ERR "T3C: handler registration for " 1010f7917c00SJeff Kirsher "opcode %x failed\n", opcode); 1011f7917c00SJeff Kirsher } 1012f7917c00SJeff Kirsher 1013f7917c00SJeff Kirsher EXPORT_SYMBOL(t3_register_cpl_handler); 1014f7917c00SJeff Kirsher 1015f7917c00SJeff Kirsher /* 1016f7917c00SJeff Kirsher * T3CDEV's receive method. 1017f7917c00SJeff Kirsher */ 1018f7917c00SJeff Kirsher static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n) 1019f7917c00SJeff Kirsher { 1020f7917c00SJeff Kirsher while (n--) { 1021f7917c00SJeff Kirsher struct sk_buff *skb = *skbs++; 1022f7917c00SJeff Kirsher unsigned int opcode = get_opcode(skb); 1023f7917c00SJeff Kirsher int ret = cpl_handlers[opcode] (dev, skb); 1024f7917c00SJeff Kirsher 1025f7917c00SJeff Kirsher #if VALIDATE_TID 1026f7917c00SJeff Kirsher if (ret & CPL_RET_UNKNOWN_TID) { 1027f7917c00SJeff Kirsher union opcode_tid *p = cplhdr(skb); 1028f7917c00SJeff Kirsher 1029f7917c00SJeff Kirsher printk(KERN_ERR "%s: CPL message (opcode %u) had " 1030f7917c00SJeff Kirsher "unknown TID %u\n", dev->name, opcode, 1031f7917c00SJeff Kirsher G_TID(ntohl(p->opcode_tid))); 1032f7917c00SJeff Kirsher } 1033f7917c00SJeff Kirsher #endif 1034f7917c00SJeff Kirsher if (ret & CPL_RET_BUF_DONE) 1035f7917c00SJeff Kirsher kfree_skb(skb); 1036f7917c00SJeff Kirsher } 1037f7917c00SJeff Kirsher return 0; 1038f7917c00SJeff Kirsher } 1039f7917c00SJeff Kirsher 1040f7917c00SJeff Kirsher /* 1041f7917c00SJeff Kirsher * Sends an sk_buff to a T3C driver after dealing with any active network taps. 1042f7917c00SJeff Kirsher */ 1043f7917c00SJeff Kirsher int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb) 1044f7917c00SJeff Kirsher { 1045f7917c00SJeff Kirsher int r; 1046f7917c00SJeff Kirsher 1047f7917c00SJeff Kirsher local_bh_disable(); 1048f7917c00SJeff Kirsher r = dev->send(dev, skb); 1049f7917c00SJeff Kirsher local_bh_enable(); 1050f7917c00SJeff Kirsher return r; 1051f7917c00SJeff Kirsher } 1052f7917c00SJeff Kirsher 1053f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb3_ofld_send); 1054f7917c00SJeff Kirsher 1055f7917c00SJeff Kirsher static int is_offloading(struct net_device *dev) 1056f7917c00SJeff Kirsher { 1057f7917c00SJeff Kirsher struct adapter *adapter; 1058f7917c00SJeff Kirsher int i; 1059f7917c00SJeff Kirsher 1060f7917c00SJeff Kirsher read_lock_bh(&adapter_list_lock); 1061f7917c00SJeff Kirsher list_for_each_entry(adapter, &adapter_list, adapter_list) { 1062f7917c00SJeff Kirsher for_each_port(adapter, i) { 1063f7917c00SJeff Kirsher if (dev == adapter->port[i]) { 1064f7917c00SJeff Kirsher read_unlock_bh(&adapter_list_lock); 1065f7917c00SJeff Kirsher return 1; 1066f7917c00SJeff Kirsher } 1067f7917c00SJeff Kirsher } 1068f7917c00SJeff Kirsher } 1069f7917c00SJeff Kirsher read_unlock_bh(&adapter_list_lock); 1070f7917c00SJeff Kirsher return 0; 1071f7917c00SJeff Kirsher } 1072f7917c00SJeff Kirsher 1073f7917c00SJeff Kirsher static void cxgb_neigh_update(struct neighbour *neigh) 1074f7917c00SJeff Kirsher { 1075f7917c00SJeff Kirsher struct net_device *dev = neigh->dev; 1076f7917c00SJeff Kirsher 1077f7917c00SJeff Kirsher if (dev && (is_offloading(dev))) { 1078f7917c00SJeff Kirsher struct t3cdev *tdev = dev2t3cdev(dev); 1079f7917c00SJeff Kirsher 1080f7917c00SJeff Kirsher BUG_ON(!tdev); 1081f7917c00SJeff Kirsher t3_l2t_update(tdev, neigh); 1082f7917c00SJeff Kirsher } 1083f7917c00SJeff Kirsher } 1084f7917c00SJeff Kirsher 1085f7917c00SJeff Kirsher static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e) 1086f7917c00SJeff Kirsher { 1087f7917c00SJeff Kirsher struct sk_buff *skb; 1088f7917c00SJeff Kirsher struct cpl_set_tcb_field *req; 1089f7917c00SJeff Kirsher 1090f7917c00SJeff Kirsher skb = alloc_skb(sizeof(*req), GFP_ATOMIC); 1091f7917c00SJeff Kirsher if (!skb) { 1092f7917c00SJeff Kirsher printk(KERN_ERR "%s: cannot allocate skb!\n", __func__); 1093f7917c00SJeff Kirsher return; 1094f7917c00SJeff Kirsher } 1095f7917c00SJeff Kirsher skb->priority = CPL_PRIORITY_CONTROL; 1096f7917c00SJeff Kirsher req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req)); 1097f7917c00SJeff Kirsher req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1098f7917c00SJeff Kirsher OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 1099f7917c00SJeff Kirsher req->reply = 0; 1100f7917c00SJeff Kirsher req->cpu_idx = 0; 1101f7917c00SJeff Kirsher req->word = htons(W_TCB_L2T_IX); 1102f7917c00SJeff Kirsher req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX)); 1103f7917c00SJeff Kirsher req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx)); 1104f7917c00SJeff Kirsher tdev->send(tdev, skb); 1105f7917c00SJeff Kirsher } 1106f7917c00SJeff Kirsher 1107f7917c00SJeff Kirsher static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) 1108f7917c00SJeff Kirsher { 1109f7917c00SJeff Kirsher struct net_device *olddev, *newdev; 1110f7917c00SJeff Kirsher struct tid_info *ti; 1111f7917c00SJeff Kirsher struct t3cdev *tdev; 1112f7917c00SJeff Kirsher u32 tid; 1113f7917c00SJeff Kirsher int update_tcb; 1114f7917c00SJeff Kirsher struct l2t_entry *e; 1115f7917c00SJeff Kirsher struct t3c_tid_entry *te; 1116f7917c00SJeff Kirsher 111727217455SDavid Miller olddev = dst_get_neighbour_noref(old)->dev; 111827217455SDavid Miller newdev = dst_get_neighbour_noref(new)->dev; 1119f7917c00SJeff Kirsher if (!is_offloading(olddev)) 1120f7917c00SJeff Kirsher return; 1121f7917c00SJeff Kirsher if (!is_offloading(newdev)) { 1122f7917c00SJeff Kirsher printk(KERN_WARNING "%s: Redirect to non-offload " 1123f7917c00SJeff Kirsher "device ignored.\n", __func__); 1124f7917c00SJeff Kirsher return; 1125f7917c00SJeff Kirsher } 1126f7917c00SJeff Kirsher tdev = dev2t3cdev(olddev); 1127f7917c00SJeff Kirsher BUG_ON(!tdev); 1128f7917c00SJeff Kirsher if (tdev != dev2t3cdev(newdev)) { 1129f7917c00SJeff Kirsher printk(KERN_WARNING "%s: Redirect to different " 1130f7917c00SJeff Kirsher "offload device ignored.\n", __func__); 1131f7917c00SJeff Kirsher return; 1132f7917c00SJeff Kirsher } 1133f7917c00SJeff Kirsher 1134f7917c00SJeff Kirsher /* Add new L2T entry */ 113527217455SDavid Miller e = t3_l2t_get(tdev, dst_get_neighbour_noref(new), newdev); 1136f7917c00SJeff Kirsher if (!e) { 1137f7917c00SJeff Kirsher printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n", 1138f7917c00SJeff Kirsher __func__); 1139f7917c00SJeff Kirsher return; 1140f7917c00SJeff Kirsher } 1141f7917c00SJeff Kirsher 1142f7917c00SJeff Kirsher /* Walk tid table and notify clients of dst change. */ 1143f7917c00SJeff Kirsher ti = &(T3C_DATA(tdev))->tid_maps; 1144f7917c00SJeff Kirsher for (tid = 0; tid < ti->ntids; tid++) { 1145f7917c00SJeff Kirsher te = lookup_tid(ti, tid); 1146f7917c00SJeff Kirsher BUG_ON(!te); 1147f7917c00SJeff Kirsher if (te && te->ctx && te->client && te->client->redirect) { 1148f7917c00SJeff Kirsher update_tcb = te->client->redirect(te->ctx, old, new, e); 1149f7917c00SJeff Kirsher if (update_tcb) { 115088c5100cSDavid S. Miller rcu_read_lock(); 1151f7917c00SJeff Kirsher l2t_hold(L2DATA(tdev), e); 115288c5100cSDavid S. Miller rcu_read_unlock(); 1153f7917c00SJeff Kirsher set_l2t_ix(tdev, tid, e); 1154f7917c00SJeff Kirsher } 1155f7917c00SJeff Kirsher } 1156f7917c00SJeff Kirsher } 115788c5100cSDavid S. Miller l2t_release(tdev, e); 1158f7917c00SJeff Kirsher } 1159f7917c00SJeff Kirsher 1160f7917c00SJeff Kirsher /* 1161f7917c00SJeff Kirsher * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. 1162f7917c00SJeff Kirsher * The allocated memory is cleared. 1163f7917c00SJeff Kirsher */ 1164f7917c00SJeff Kirsher void *cxgb_alloc_mem(unsigned long size) 1165f7917c00SJeff Kirsher { 1166f7917c00SJeff Kirsher void *p = kzalloc(size, GFP_KERNEL); 1167f7917c00SJeff Kirsher 1168f7917c00SJeff Kirsher if (!p) 1169f7917c00SJeff Kirsher p = vzalloc(size); 1170f7917c00SJeff Kirsher return p; 1171f7917c00SJeff Kirsher } 1172f7917c00SJeff Kirsher 1173f7917c00SJeff Kirsher /* 1174f7917c00SJeff Kirsher * Free memory allocated through t3_alloc_mem(). 1175f7917c00SJeff Kirsher */ 1176f7917c00SJeff Kirsher void cxgb_free_mem(void *addr) 1177f7917c00SJeff Kirsher { 1178f7917c00SJeff Kirsher if (is_vmalloc_addr(addr)) 1179f7917c00SJeff Kirsher vfree(addr); 1180f7917c00SJeff Kirsher else 1181f7917c00SJeff Kirsher kfree(addr); 1182f7917c00SJeff Kirsher } 1183f7917c00SJeff Kirsher 1184f7917c00SJeff Kirsher /* 1185f7917c00SJeff Kirsher * Allocate and initialize the TID tables. Returns 0 on success. 1186f7917c00SJeff Kirsher */ 1187f7917c00SJeff Kirsher static int init_tid_tabs(struct tid_info *t, unsigned int ntids, 1188f7917c00SJeff Kirsher unsigned int natids, unsigned int nstids, 1189f7917c00SJeff Kirsher unsigned int atid_base, unsigned int stid_base) 1190f7917c00SJeff Kirsher { 1191f7917c00SJeff Kirsher unsigned long size = ntids * sizeof(*t->tid_tab) + 1192f7917c00SJeff Kirsher natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab); 1193f7917c00SJeff Kirsher 1194f7917c00SJeff Kirsher t->tid_tab = cxgb_alloc_mem(size); 1195f7917c00SJeff Kirsher if (!t->tid_tab) 1196f7917c00SJeff Kirsher return -ENOMEM; 1197f7917c00SJeff Kirsher 1198f7917c00SJeff Kirsher t->stid_tab = (union listen_entry *)&t->tid_tab[ntids]; 1199f7917c00SJeff Kirsher t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids]; 1200f7917c00SJeff Kirsher t->ntids = ntids; 1201f7917c00SJeff Kirsher t->nstids = nstids; 1202f7917c00SJeff Kirsher t->stid_base = stid_base; 1203f7917c00SJeff Kirsher t->sfree = NULL; 1204f7917c00SJeff Kirsher t->natids = natids; 1205f7917c00SJeff Kirsher t->atid_base = atid_base; 1206f7917c00SJeff Kirsher t->afree = NULL; 1207f7917c00SJeff Kirsher t->stids_in_use = t->atids_in_use = 0; 1208f7917c00SJeff Kirsher atomic_set(&t->tids_in_use, 0); 1209f7917c00SJeff Kirsher spin_lock_init(&t->stid_lock); 1210f7917c00SJeff Kirsher spin_lock_init(&t->atid_lock); 1211f7917c00SJeff Kirsher 1212f7917c00SJeff Kirsher /* 1213f7917c00SJeff Kirsher * Setup the free lists for stid_tab and atid_tab. 1214f7917c00SJeff Kirsher */ 1215f7917c00SJeff Kirsher if (nstids) { 1216f7917c00SJeff Kirsher while (--nstids) 1217f7917c00SJeff Kirsher t->stid_tab[nstids - 1].next = &t->stid_tab[nstids]; 1218f7917c00SJeff Kirsher t->sfree = t->stid_tab; 1219f7917c00SJeff Kirsher } 1220f7917c00SJeff Kirsher if (natids) { 1221f7917c00SJeff Kirsher while (--natids) 1222f7917c00SJeff Kirsher t->atid_tab[natids - 1].next = &t->atid_tab[natids]; 1223f7917c00SJeff Kirsher t->afree = t->atid_tab; 1224f7917c00SJeff Kirsher } 1225f7917c00SJeff Kirsher return 0; 1226f7917c00SJeff Kirsher } 1227f7917c00SJeff Kirsher 1228f7917c00SJeff Kirsher static void free_tid_maps(struct tid_info *t) 1229f7917c00SJeff Kirsher { 1230f7917c00SJeff Kirsher cxgb_free_mem(t->tid_tab); 1231f7917c00SJeff Kirsher } 1232f7917c00SJeff Kirsher 1233f7917c00SJeff Kirsher static inline void add_adapter(struct adapter *adap) 1234f7917c00SJeff Kirsher { 1235f7917c00SJeff Kirsher write_lock_bh(&adapter_list_lock); 1236f7917c00SJeff Kirsher list_add_tail(&adap->adapter_list, &adapter_list); 1237f7917c00SJeff Kirsher write_unlock_bh(&adapter_list_lock); 1238f7917c00SJeff Kirsher } 1239f7917c00SJeff Kirsher 1240f7917c00SJeff Kirsher static inline void remove_adapter(struct adapter *adap) 1241f7917c00SJeff Kirsher { 1242f7917c00SJeff Kirsher write_lock_bh(&adapter_list_lock); 1243f7917c00SJeff Kirsher list_del(&adap->adapter_list); 1244f7917c00SJeff Kirsher write_unlock_bh(&adapter_list_lock); 1245f7917c00SJeff Kirsher } 1246f7917c00SJeff Kirsher 1247f7917c00SJeff Kirsher int cxgb3_offload_activate(struct adapter *adapter) 1248f7917c00SJeff Kirsher { 1249f7917c00SJeff Kirsher struct t3cdev *dev = &adapter->tdev; 1250f7917c00SJeff Kirsher int natids, err; 1251f7917c00SJeff Kirsher struct t3c_data *t; 1252f7917c00SJeff Kirsher struct tid_range stid_range, tid_range; 1253f7917c00SJeff Kirsher struct mtutab mtutab; 1254f7917c00SJeff Kirsher unsigned int l2t_capacity; 1255f7917c00SJeff Kirsher 1256f7917c00SJeff Kirsher t = kzalloc(sizeof(*t), GFP_KERNEL); 1257f7917c00SJeff Kirsher if (!t) 1258f7917c00SJeff Kirsher return -ENOMEM; 1259f7917c00SJeff Kirsher 1260f7917c00SJeff Kirsher err = -EOPNOTSUPP; 1261f7917c00SJeff Kirsher if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 || 1262f7917c00SJeff Kirsher dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 || 1263f7917c00SJeff Kirsher dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 || 1264f7917c00SJeff Kirsher dev->ctl(dev, GET_MTUS, &mtutab) < 0 || 1265f7917c00SJeff Kirsher dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 || 1266f7917c00SJeff Kirsher dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0) 1267f7917c00SJeff Kirsher goto out_free; 1268f7917c00SJeff Kirsher 1269f7917c00SJeff Kirsher err = -ENOMEM; 127088c5100cSDavid S. Miller RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity)); 1271f7917c00SJeff Kirsher if (!L2DATA(dev)) 1272f7917c00SJeff Kirsher goto out_free; 1273f7917c00SJeff Kirsher 1274f7917c00SJeff Kirsher natids = min(tid_range.num / 2, MAX_ATIDS); 1275f7917c00SJeff Kirsher err = init_tid_tabs(&t->tid_maps, tid_range.num, natids, 1276f7917c00SJeff Kirsher stid_range.num, ATID_BASE, stid_range.base); 1277f7917c00SJeff Kirsher if (err) 1278f7917c00SJeff Kirsher goto out_free_l2t; 1279f7917c00SJeff Kirsher 1280f7917c00SJeff Kirsher t->mtus = mtutab.mtus; 1281f7917c00SJeff Kirsher t->nmtus = mtutab.size; 1282f7917c00SJeff Kirsher 1283f7917c00SJeff Kirsher INIT_WORK(&t->tid_release_task, t3_process_tid_release_list); 1284f7917c00SJeff Kirsher spin_lock_init(&t->tid_release_lock); 1285f7917c00SJeff Kirsher INIT_LIST_HEAD(&t->list_node); 1286f7917c00SJeff Kirsher t->dev = dev; 1287f7917c00SJeff Kirsher 1288f7917c00SJeff Kirsher T3C_DATA(dev) = t; 1289f7917c00SJeff Kirsher dev->recv = process_rx; 1290f7917c00SJeff Kirsher dev->neigh_update = t3_l2t_update; 1291f7917c00SJeff Kirsher 1292f7917c00SJeff Kirsher /* Register netevent handler once */ 1293f7917c00SJeff Kirsher if (list_empty(&adapter_list)) 1294f7917c00SJeff Kirsher register_netevent_notifier(&nb); 1295f7917c00SJeff Kirsher 1296f7917c00SJeff Kirsher t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL); 1297f7917c00SJeff Kirsher t->release_list_incomplete = 0; 1298f7917c00SJeff Kirsher 1299f7917c00SJeff Kirsher add_adapter(adapter); 1300f7917c00SJeff Kirsher return 0; 1301f7917c00SJeff Kirsher 1302f7917c00SJeff Kirsher out_free_l2t: 1303f7917c00SJeff Kirsher t3_free_l2t(L2DATA(dev)); 13042cfa5a04SEric Dumazet RCU_INIT_POINTER(dev->l2opt, NULL); 1305f7917c00SJeff Kirsher out_free: 1306f7917c00SJeff Kirsher kfree(t); 1307f7917c00SJeff Kirsher return err; 1308f7917c00SJeff Kirsher } 1309f7917c00SJeff Kirsher 131088c5100cSDavid S. Miller static void clean_l2_data(struct rcu_head *head) 131188c5100cSDavid S. Miller { 131288c5100cSDavid S. Miller struct l2t_data *d = container_of(head, struct l2t_data, rcu_head); 131388c5100cSDavid S. Miller t3_free_l2t(d); 131488c5100cSDavid S. Miller } 131588c5100cSDavid S. Miller 131688c5100cSDavid S. Miller 1317f7917c00SJeff Kirsher void cxgb3_offload_deactivate(struct adapter *adapter) 1318f7917c00SJeff Kirsher { 1319f7917c00SJeff Kirsher struct t3cdev *tdev = &adapter->tdev; 1320f7917c00SJeff Kirsher struct t3c_data *t = T3C_DATA(tdev); 132188c5100cSDavid S. Miller struct l2t_data *d; 1322f7917c00SJeff Kirsher 1323f7917c00SJeff Kirsher remove_adapter(adapter); 1324f7917c00SJeff Kirsher if (list_empty(&adapter_list)) 1325f7917c00SJeff Kirsher unregister_netevent_notifier(&nb); 1326f7917c00SJeff Kirsher 1327f7917c00SJeff Kirsher free_tid_maps(&t->tid_maps); 1328f7917c00SJeff Kirsher T3C_DATA(tdev) = NULL; 132988c5100cSDavid S. Miller rcu_read_lock(); 133088c5100cSDavid S. Miller d = L2DATA(tdev); 133188c5100cSDavid S. Miller rcu_read_unlock(); 13322cfa5a04SEric Dumazet RCU_INIT_POINTER(tdev->l2opt, NULL); 133388c5100cSDavid S. Miller call_rcu(&d->rcu_head, clean_l2_data); 1334f7917c00SJeff Kirsher if (t->nofail_skb) 1335f7917c00SJeff Kirsher kfree_skb(t->nofail_skb); 1336f7917c00SJeff Kirsher kfree(t); 1337f7917c00SJeff Kirsher } 1338f7917c00SJeff Kirsher 1339f7917c00SJeff Kirsher static inline void register_tdev(struct t3cdev *tdev) 1340f7917c00SJeff Kirsher { 1341f7917c00SJeff Kirsher static int unit; 1342f7917c00SJeff Kirsher 1343f7917c00SJeff Kirsher mutex_lock(&cxgb3_db_lock); 1344f7917c00SJeff Kirsher snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++); 1345f7917c00SJeff Kirsher list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list); 1346f7917c00SJeff Kirsher mutex_unlock(&cxgb3_db_lock); 1347f7917c00SJeff Kirsher } 1348f7917c00SJeff Kirsher 1349f7917c00SJeff Kirsher static inline void unregister_tdev(struct t3cdev *tdev) 1350f7917c00SJeff Kirsher { 1351f7917c00SJeff Kirsher mutex_lock(&cxgb3_db_lock); 1352f7917c00SJeff Kirsher list_del(&tdev->ofld_dev_list); 1353f7917c00SJeff Kirsher mutex_unlock(&cxgb3_db_lock); 1354f7917c00SJeff Kirsher } 1355f7917c00SJeff Kirsher 1356f7917c00SJeff Kirsher static inline int adap2type(struct adapter *adapter) 1357f7917c00SJeff Kirsher { 1358f7917c00SJeff Kirsher int type = 0; 1359f7917c00SJeff Kirsher 1360f7917c00SJeff Kirsher switch (adapter->params.rev) { 1361f7917c00SJeff Kirsher case T3_REV_A: 1362f7917c00SJeff Kirsher type = T3A; 1363f7917c00SJeff Kirsher break; 1364f7917c00SJeff Kirsher case T3_REV_B: 1365f7917c00SJeff Kirsher case T3_REV_B2: 1366f7917c00SJeff Kirsher type = T3B; 1367f7917c00SJeff Kirsher break; 1368f7917c00SJeff Kirsher case T3_REV_C: 1369f7917c00SJeff Kirsher type = T3C; 1370f7917c00SJeff Kirsher break; 1371f7917c00SJeff Kirsher } 1372f7917c00SJeff Kirsher return type; 1373f7917c00SJeff Kirsher } 1374f7917c00SJeff Kirsher 1375f7917c00SJeff Kirsher void __devinit cxgb3_adapter_ofld(struct adapter *adapter) 1376f7917c00SJeff Kirsher { 1377f7917c00SJeff Kirsher struct t3cdev *tdev = &adapter->tdev; 1378f7917c00SJeff Kirsher 1379f7917c00SJeff Kirsher INIT_LIST_HEAD(&tdev->ofld_dev_list); 1380f7917c00SJeff Kirsher 1381f7917c00SJeff Kirsher cxgb3_set_dummy_ops(tdev); 1382f7917c00SJeff Kirsher tdev->send = t3_offload_tx; 1383f7917c00SJeff Kirsher tdev->ctl = cxgb_offload_ctl; 1384f7917c00SJeff Kirsher tdev->type = adap2type(adapter); 1385f7917c00SJeff Kirsher 1386f7917c00SJeff Kirsher register_tdev(tdev); 1387f7917c00SJeff Kirsher } 1388f7917c00SJeff Kirsher 1389f7917c00SJeff Kirsher void __devexit cxgb3_adapter_unofld(struct adapter *adapter) 1390f7917c00SJeff Kirsher { 1391f7917c00SJeff Kirsher struct t3cdev *tdev = &adapter->tdev; 1392f7917c00SJeff Kirsher 1393f7917c00SJeff Kirsher tdev->recv = NULL; 1394f7917c00SJeff Kirsher tdev->neigh_update = NULL; 1395f7917c00SJeff Kirsher 1396f7917c00SJeff Kirsher unregister_tdev(tdev); 1397f7917c00SJeff Kirsher } 1398f7917c00SJeff Kirsher 1399f7917c00SJeff Kirsher void __init cxgb3_offload_init(void) 1400f7917c00SJeff Kirsher { 1401f7917c00SJeff Kirsher int i; 1402f7917c00SJeff Kirsher 1403f7917c00SJeff Kirsher for (i = 0; i < NUM_CPL_CMDS; ++i) 1404f7917c00SJeff Kirsher cpl_handlers[i] = do_bad_cpl; 1405f7917c00SJeff Kirsher 1406f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl); 1407f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl); 1408f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl); 1409f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl); 1410f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl); 1411f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr); 1412f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl); 1413f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl); 1414f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl); 1415f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl); 1416f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl); 1417f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl); 1418f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl); 1419f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl); 1420f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl); 1421f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl); 1422f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss); 1423f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish); 1424f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl); 1425f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl); 1426f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term); 1427f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl); 1428f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_TRACE_PKT, do_trace); 1429f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl); 1430f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl); 1431f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl); 1432f7917c00SJeff Kirsher } 1433