1f7917c00SJeff Kirsher /* 2f7917c00SJeff Kirsher * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved. 3f7917c00SJeff Kirsher * 4f7917c00SJeff Kirsher * This software is available to you under a choice of one of two 5f7917c00SJeff Kirsher * licenses. You may choose to be licensed under the terms of the GNU 6f7917c00SJeff Kirsher * General Public License (GPL) Version 2, available from the file 7f7917c00SJeff Kirsher * COPYING in the main directory of this source tree, or the 8f7917c00SJeff Kirsher * OpenIB.org BSD license below: 9f7917c00SJeff Kirsher * 10f7917c00SJeff Kirsher * Redistribution and use in source and binary forms, with or 11f7917c00SJeff Kirsher * without modification, are permitted provided that the following 12f7917c00SJeff Kirsher * conditions are met: 13f7917c00SJeff Kirsher * 14f7917c00SJeff Kirsher * - Redistributions of source code must retain the above 15f7917c00SJeff Kirsher * copyright notice, this list of conditions and the following 16f7917c00SJeff Kirsher * disclaimer. 17f7917c00SJeff Kirsher * 18f7917c00SJeff Kirsher * - Redistributions in binary form must reproduce the above 19f7917c00SJeff Kirsher * copyright notice, this list of conditions and the following 20f7917c00SJeff Kirsher * disclaimer in the documentation and/or other materials 21f7917c00SJeff Kirsher * provided with the distribution. 22f7917c00SJeff Kirsher * 23f7917c00SJeff Kirsher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24f7917c00SJeff Kirsher * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25f7917c00SJeff Kirsher * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26f7917c00SJeff Kirsher * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27f7917c00SJeff Kirsher * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28f7917c00SJeff Kirsher * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29f7917c00SJeff Kirsher * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30f7917c00SJeff Kirsher * SOFTWARE. 31f7917c00SJeff Kirsher */ 32f7917c00SJeff Kirsher 33f7917c00SJeff Kirsher #include <linux/list.h> 34f7917c00SJeff Kirsher #include <linux/slab.h> 35f7917c00SJeff Kirsher #include <net/neighbour.h> 36f7917c00SJeff Kirsher #include <linux/notifier.h> 37f7917c00SJeff Kirsher #include <linux/atomic.h> 38f7917c00SJeff Kirsher #include <linux/proc_fs.h> 39f7917c00SJeff Kirsher #include <linux/if_vlan.h> 40f7917c00SJeff Kirsher #include <net/netevent.h> 41f7917c00SJeff Kirsher #include <linux/highmem.h> 42f7917c00SJeff Kirsher #include <linux/vmalloc.h> 43ee40fa06SPaul Gortmaker #include <linux/export.h> 44f7917c00SJeff Kirsher 45f7917c00SJeff Kirsher #include "common.h" 46f7917c00SJeff Kirsher #include "regs.h" 47f7917c00SJeff Kirsher #include "cxgb3_ioctl.h" 48f7917c00SJeff Kirsher #include "cxgb3_ctl_defs.h" 49f7917c00SJeff Kirsher #include "cxgb3_defs.h" 50f7917c00SJeff Kirsher #include "l2t.h" 51f7917c00SJeff Kirsher #include "firmware_exports.h" 52f7917c00SJeff Kirsher #include "cxgb3_offload.h" 53f7917c00SJeff Kirsher 54f7917c00SJeff Kirsher static LIST_HEAD(client_list); 55f7917c00SJeff Kirsher static LIST_HEAD(ofld_dev_list); 56f7917c00SJeff Kirsher static DEFINE_MUTEX(cxgb3_db_lock); 57f7917c00SJeff Kirsher 58f7917c00SJeff Kirsher static DEFINE_RWLOCK(adapter_list_lock); 59f7917c00SJeff Kirsher static LIST_HEAD(adapter_list); 60f7917c00SJeff Kirsher 61f7917c00SJeff Kirsher static const unsigned int MAX_ATIDS = 64 * 1024; 62f7917c00SJeff Kirsher static const unsigned int ATID_BASE = 0x10000; 63f7917c00SJeff Kirsher 64f7917c00SJeff Kirsher static void cxgb_neigh_update(struct neighbour *neigh); 651d248b1cSDavid S. Miller static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh, 661d248b1cSDavid S. Miller struct dst_entry *new, struct neighbour *new_neigh); 67f7917c00SJeff Kirsher 68f7917c00SJeff Kirsher static inline int offload_activated(struct t3cdev *tdev) 69f7917c00SJeff Kirsher { 70f7917c00SJeff Kirsher const struct adapter *adapter = tdev2adap(tdev); 71f7917c00SJeff Kirsher 72f7917c00SJeff Kirsher return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); 73f7917c00SJeff Kirsher } 74f7917c00SJeff Kirsher 75f7917c00SJeff Kirsher /** 76f7917c00SJeff Kirsher * cxgb3_register_client - register an offload client 77f7917c00SJeff Kirsher * @client: the client 78f7917c00SJeff Kirsher * 79f7917c00SJeff Kirsher * Add the client to the client list, 80f7917c00SJeff Kirsher * and call backs the client for each activated offload device 81f7917c00SJeff Kirsher */ 82f7917c00SJeff Kirsher void cxgb3_register_client(struct cxgb3_client *client) 83f7917c00SJeff Kirsher { 84f7917c00SJeff Kirsher struct t3cdev *tdev; 85f7917c00SJeff Kirsher 86f7917c00SJeff Kirsher mutex_lock(&cxgb3_db_lock); 87f7917c00SJeff Kirsher list_add_tail(&client->client_list, &client_list); 88f7917c00SJeff Kirsher 89f7917c00SJeff Kirsher if (client->add) { 90f7917c00SJeff Kirsher list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) { 91f7917c00SJeff Kirsher if (offload_activated(tdev)) 92f7917c00SJeff Kirsher client->add(tdev); 93f7917c00SJeff Kirsher } 94f7917c00SJeff Kirsher } 95f7917c00SJeff Kirsher mutex_unlock(&cxgb3_db_lock); 96f7917c00SJeff Kirsher } 97f7917c00SJeff Kirsher 98f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb3_register_client); 99f7917c00SJeff Kirsher 100f7917c00SJeff Kirsher /** 101f7917c00SJeff Kirsher * cxgb3_unregister_client - unregister an offload client 102f7917c00SJeff Kirsher * @client: the client 103f7917c00SJeff Kirsher * 104f7917c00SJeff Kirsher * Remove the client to the client list, 105f7917c00SJeff Kirsher * and call backs the client for each activated offload device. 106f7917c00SJeff Kirsher */ 107f7917c00SJeff Kirsher void cxgb3_unregister_client(struct cxgb3_client *client) 108f7917c00SJeff Kirsher { 109f7917c00SJeff Kirsher struct t3cdev *tdev; 110f7917c00SJeff Kirsher 111f7917c00SJeff Kirsher mutex_lock(&cxgb3_db_lock); 112f7917c00SJeff Kirsher list_del(&client->client_list); 113f7917c00SJeff Kirsher 114f7917c00SJeff Kirsher if (client->remove) { 115f7917c00SJeff Kirsher list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) { 116f7917c00SJeff Kirsher if (offload_activated(tdev)) 117f7917c00SJeff Kirsher client->remove(tdev); 118f7917c00SJeff Kirsher } 119f7917c00SJeff Kirsher } 120f7917c00SJeff Kirsher mutex_unlock(&cxgb3_db_lock); 121f7917c00SJeff Kirsher } 122f7917c00SJeff Kirsher 123f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb3_unregister_client); 124f7917c00SJeff Kirsher 125f7917c00SJeff Kirsher /** 126f7917c00SJeff Kirsher * cxgb3_add_clients - activate registered clients for an offload device 127f7917c00SJeff Kirsher * @tdev: the offload device 128f7917c00SJeff Kirsher * 129f7917c00SJeff Kirsher * Call backs all registered clients once a offload device is activated 130f7917c00SJeff Kirsher */ 131f7917c00SJeff Kirsher void cxgb3_add_clients(struct t3cdev *tdev) 132f7917c00SJeff Kirsher { 133f7917c00SJeff Kirsher struct cxgb3_client *client; 134f7917c00SJeff Kirsher 135f7917c00SJeff Kirsher mutex_lock(&cxgb3_db_lock); 136f7917c00SJeff Kirsher list_for_each_entry(client, &client_list, client_list) { 137f7917c00SJeff Kirsher if (client->add) 138f7917c00SJeff Kirsher client->add(tdev); 139f7917c00SJeff Kirsher } 140f7917c00SJeff Kirsher mutex_unlock(&cxgb3_db_lock); 141f7917c00SJeff Kirsher } 142f7917c00SJeff Kirsher 143f7917c00SJeff Kirsher /** 144f7917c00SJeff Kirsher * cxgb3_remove_clients - deactivates registered clients 145f7917c00SJeff Kirsher * for an offload device 146f7917c00SJeff Kirsher * @tdev: the offload device 147f7917c00SJeff Kirsher * 148f7917c00SJeff Kirsher * Call backs all registered clients once a offload device is deactivated 149f7917c00SJeff Kirsher */ 150f7917c00SJeff Kirsher void cxgb3_remove_clients(struct t3cdev *tdev) 151f7917c00SJeff Kirsher { 152f7917c00SJeff Kirsher struct cxgb3_client *client; 153f7917c00SJeff Kirsher 154f7917c00SJeff Kirsher mutex_lock(&cxgb3_db_lock); 155f7917c00SJeff Kirsher list_for_each_entry(client, &client_list, client_list) { 156f7917c00SJeff Kirsher if (client->remove) 157f7917c00SJeff Kirsher client->remove(tdev); 158f7917c00SJeff Kirsher } 159f7917c00SJeff Kirsher mutex_unlock(&cxgb3_db_lock); 160f7917c00SJeff Kirsher } 161f7917c00SJeff Kirsher 162f7917c00SJeff Kirsher void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port) 163f7917c00SJeff Kirsher { 164f7917c00SJeff Kirsher struct cxgb3_client *client; 165f7917c00SJeff Kirsher 166f7917c00SJeff Kirsher mutex_lock(&cxgb3_db_lock); 167f7917c00SJeff Kirsher list_for_each_entry(client, &client_list, client_list) { 168f7917c00SJeff Kirsher if (client->event_handler) 169f7917c00SJeff Kirsher client->event_handler(tdev, event, port); 170f7917c00SJeff Kirsher } 171f7917c00SJeff Kirsher mutex_unlock(&cxgb3_db_lock); 172f7917c00SJeff Kirsher } 173f7917c00SJeff Kirsher 174f7917c00SJeff Kirsher static struct net_device *get_iff_from_mac(struct adapter *adapter, 175f7917c00SJeff Kirsher const unsigned char *mac, 176f7917c00SJeff Kirsher unsigned int vlan) 177f7917c00SJeff Kirsher { 178f7917c00SJeff Kirsher int i; 179f7917c00SJeff Kirsher 180f7917c00SJeff Kirsher for_each_port(adapter, i) { 181f7917c00SJeff Kirsher struct net_device *dev = adapter->port[i]; 182f7917c00SJeff Kirsher 183f7917c00SJeff Kirsher if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) { 184f7917c00SJeff Kirsher if (vlan && vlan != VLAN_VID_MASK) { 185f7917c00SJeff Kirsher rcu_read_lock(); 186f7917c00SJeff Kirsher dev = __vlan_find_dev_deep(dev, vlan); 187f7917c00SJeff Kirsher rcu_read_unlock(); 188f7917c00SJeff Kirsher } else if (netif_is_bond_slave(dev)) { 189f7917c00SJeff Kirsher while (dev->master) 190f7917c00SJeff Kirsher dev = dev->master; 191f7917c00SJeff Kirsher } 192f7917c00SJeff Kirsher return dev; 193f7917c00SJeff Kirsher } 194f7917c00SJeff Kirsher } 195f7917c00SJeff Kirsher return NULL; 196f7917c00SJeff Kirsher } 197f7917c00SJeff Kirsher 198f7917c00SJeff Kirsher static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req, 199f7917c00SJeff Kirsher void *data) 200f7917c00SJeff Kirsher { 201f7917c00SJeff Kirsher int i; 202f7917c00SJeff Kirsher int ret = 0; 203f7917c00SJeff Kirsher unsigned int val = 0; 204f7917c00SJeff Kirsher struct ulp_iscsi_info *uiip = data; 205f7917c00SJeff Kirsher 206f7917c00SJeff Kirsher switch (req) { 207f7917c00SJeff Kirsher case ULP_ISCSI_GET_PARAMS: 208f7917c00SJeff Kirsher uiip->pdev = adapter->pdev; 209f7917c00SJeff Kirsher uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT); 210f7917c00SJeff Kirsher uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT); 211f7917c00SJeff Kirsher uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK); 212f7917c00SJeff Kirsher 213f7917c00SJeff Kirsher val = t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ); 214f7917c00SJeff Kirsher for (i = 0; i < 4; i++, val >>= 8) 215f7917c00SJeff Kirsher uiip->pgsz_factor[i] = val & 0xFF; 216f7917c00SJeff Kirsher 217f7917c00SJeff Kirsher val = t3_read_reg(adapter, A_TP_PARA_REG7); 218f7917c00SJeff Kirsher uiip->max_txsz = 219f7917c00SJeff Kirsher uiip->max_rxsz = min((val >> S_PMMAXXFERLEN0)&M_PMMAXXFERLEN0, 220f7917c00SJeff Kirsher (val >> S_PMMAXXFERLEN1)&M_PMMAXXFERLEN1); 221f7917c00SJeff Kirsher /* 222f7917c00SJeff Kirsher * On tx, the iscsi pdu has to be <= tx page size and has to 223f7917c00SJeff Kirsher * fit into the Tx PM FIFO. 224f7917c00SJeff Kirsher */ 225f7917c00SJeff Kirsher val = min(adapter->params.tp.tx_pg_size, 226f7917c00SJeff Kirsher t3_read_reg(adapter, A_PM1_TX_CFG) >> 17); 227f7917c00SJeff Kirsher uiip->max_txsz = min(val, uiip->max_txsz); 228f7917c00SJeff Kirsher 229f7917c00SJeff Kirsher /* set MaxRxData to 16224 */ 230f7917c00SJeff Kirsher val = t3_read_reg(adapter, A_TP_PARA_REG2); 231f7917c00SJeff Kirsher if ((val >> S_MAXRXDATA) != 0x3f60) { 232f7917c00SJeff Kirsher val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE); 233f7917c00SJeff Kirsher val |= V_MAXRXDATA(0x3f60); 234f7917c00SJeff Kirsher printk(KERN_INFO 235f7917c00SJeff Kirsher "%s, iscsi set MaxRxData to 16224 (0x%x).\n", 236f7917c00SJeff Kirsher adapter->name, val); 237f7917c00SJeff Kirsher t3_write_reg(adapter, A_TP_PARA_REG2, val); 238f7917c00SJeff Kirsher } 239f7917c00SJeff Kirsher 240f7917c00SJeff Kirsher /* 241f7917c00SJeff Kirsher * on rx, the iscsi pdu has to be < rx page size and the 242f7917c00SJeff Kirsher * the max rx data length programmed in TP 243f7917c00SJeff Kirsher */ 244f7917c00SJeff Kirsher val = min(adapter->params.tp.rx_pg_size, 245f7917c00SJeff Kirsher ((t3_read_reg(adapter, A_TP_PARA_REG2)) >> 246f7917c00SJeff Kirsher S_MAXRXDATA) & M_MAXRXDATA); 247f7917c00SJeff Kirsher uiip->max_rxsz = min(val, uiip->max_rxsz); 248f7917c00SJeff Kirsher break; 249f7917c00SJeff Kirsher case ULP_ISCSI_SET_PARAMS: 250f7917c00SJeff Kirsher t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask); 251f7917c00SJeff Kirsher /* program the ddp page sizes */ 252f7917c00SJeff Kirsher for (i = 0; i < 4; i++) 253f7917c00SJeff Kirsher val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i); 254f7917c00SJeff Kirsher if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) { 255f7917c00SJeff Kirsher printk(KERN_INFO 256f7917c00SJeff Kirsher "%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u.\n", 257f7917c00SJeff Kirsher adapter->name, val, uiip->pgsz_factor[0], 258f7917c00SJeff Kirsher uiip->pgsz_factor[1], uiip->pgsz_factor[2], 259f7917c00SJeff Kirsher uiip->pgsz_factor[3]); 260f7917c00SJeff Kirsher t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val); 261f7917c00SJeff Kirsher } 262f7917c00SJeff Kirsher break; 263f7917c00SJeff Kirsher default: 264f7917c00SJeff Kirsher ret = -EOPNOTSUPP; 265f7917c00SJeff Kirsher } 266f7917c00SJeff Kirsher return ret; 267f7917c00SJeff Kirsher } 268f7917c00SJeff Kirsher 269f7917c00SJeff Kirsher /* Response queue used for RDMA events. */ 270f7917c00SJeff Kirsher #define ASYNC_NOTIF_RSPQ 0 271f7917c00SJeff Kirsher 272f7917c00SJeff Kirsher static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data) 273f7917c00SJeff Kirsher { 274f7917c00SJeff Kirsher int ret = 0; 275f7917c00SJeff Kirsher 276f7917c00SJeff Kirsher switch (req) { 277f7917c00SJeff Kirsher case RDMA_GET_PARAMS: { 278f7917c00SJeff Kirsher struct rdma_info *rdma = data; 279f7917c00SJeff Kirsher struct pci_dev *pdev = adapter->pdev; 280f7917c00SJeff Kirsher 281f7917c00SJeff Kirsher rdma->udbell_physbase = pci_resource_start(pdev, 2); 282f7917c00SJeff Kirsher rdma->udbell_len = pci_resource_len(pdev, 2); 283f7917c00SJeff Kirsher rdma->tpt_base = 284f7917c00SJeff Kirsher t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT); 285f7917c00SJeff Kirsher rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT); 286f7917c00SJeff Kirsher rdma->pbl_base = 287f7917c00SJeff Kirsher t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT); 288f7917c00SJeff Kirsher rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT); 289f7917c00SJeff Kirsher rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT); 290f7917c00SJeff Kirsher rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT); 291f7917c00SJeff Kirsher rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL; 292f7917c00SJeff Kirsher rdma->pdev = pdev; 293f7917c00SJeff Kirsher break; 294f7917c00SJeff Kirsher } 295f7917c00SJeff Kirsher case RDMA_CQ_OP:{ 296f7917c00SJeff Kirsher unsigned long flags; 297f7917c00SJeff Kirsher struct rdma_cq_op *rdma = data; 298f7917c00SJeff Kirsher 299f7917c00SJeff Kirsher /* may be called in any context */ 300f7917c00SJeff Kirsher spin_lock_irqsave(&adapter->sge.reg_lock, flags); 301f7917c00SJeff Kirsher ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op, 302f7917c00SJeff Kirsher rdma->credits); 303f7917c00SJeff Kirsher spin_unlock_irqrestore(&adapter->sge.reg_lock, flags); 304f7917c00SJeff Kirsher break; 305f7917c00SJeff Kirsher } 306f7917c00SJeff Kirsher case RDMA_GET_MEM:{ 307f7917c00SJeff Kirsher struct ch_mem_range *t = data; 308f7917c00SJeff Kirsher struct mc7 *mem; 309f7917c00SJeff Kirsher 310f7917c00SJeff Kirsher if ((t->addr & 7) || (t->len & 7)) 311f7917c00SJeff Kirsher return -EINVAL; 312f7917c00SJeff Kirsher if (t->mem_id == MEM_CM) 313f7917c00SJeff Kirsher mem = &adapter->cm; 314f7917c00SJeff Kirsher else if (t->mem_id == MEM_PMRX) 315f7917c00SJeff Kirsher mem = &adapter->pmrx; 316f7917c00SJeff Kirsher else if (t->mem_id == MEM_PMTX) 317f7917c00SJeff Kirsher mem = &adapter->pmtx; 318f7917c00SJeff Kirsher else 319f7917c00SJeff Kirsher return -EINVAL; 320f7917c00SJeff Kirsher 321f7917c00SJeff Kirsher ret = 322f7917c00SJeff Kirsher t3_mc7_bd_read(mem, t->addr / 8, t->len / 8, 323f7917c00SJeff Kirsher (u64 *) t->buf); 324f7917c00SJeff Kirsher if (ret) 325f7917c00SJeff Kirsher return ret; 326f7917c00SJeff Kirsher break; 327f7917c00SJeff Kirsher } 328f7917c00SJeff Kirsher case RDMA_CQ_SETUP:{ 329f7917c00SJeff Kirsher struct rdma_cq_setup *rdma = data; 330f7917c00SJeff Kirsher 331f7917c00SJeff Kirsher spin_lock_irq(&adapter->sge.reg_lock); 332f7917c00SJeff Kirsher ret = 333f7917c00SJeff Kirsher t3_sge_init_cqcntxt(adapter, rdma->id, 334f7917c00SJeff Kirsher rdma->base_addr, rdma->size, 335f7917c00SJeff Kirsher ASYNC_NOTIF_RSPQ, 336f7917c00SJeff Kirsher rdma->ovfl_mode, rdma->credits, 337f7917c00SJeff Kirsher rdma->credit_thres); 338f7917c00SJeff Kirsher spin_unlock_irq(&adapter->sge.reg_lock); 339f7917c00SJeff Kirsher break; 340f7917c00SJeff Kirsher } 341f7917c00SJeff Kirsher case RDMA_CQ_DISABLE: 342f7917c00SJeff Kirsher spin_lock_irq(&adapter->sge.reg_lock); 343f7917c00SJeff Kirsher ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data); 344f7917c00SJeff Kirsher spin_unlock_irq(&adapter->sge.reg_lock); 345f7917c00SJeff Kirsher break; 346f7917c00SJeff Kirsher case RDMA_CTRL_QP_SETUP:{ 347f7917c00SJeff Kirsher struct rdma_ctrlqp_setup *rdma = data; 348f7917c00SJeff Kirsher 349f7917c00SJeff Kirsher spin_lock_irq(&adapter->sge.reg_lock); 350f7917c00SJeff Kirsher ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0, 351f7917c00SJeff Kirsher SGE_CNTXT_RDMA, 352f7917c00SJeff Kirsher ASYNC_NOTIF_RSPQ, 353f7917c00SJeff Kirsher rdma->base_addr, rdma->size, 354f7917c00SJeff Kirsher FW_RI_TID_START, 1, 0); 355f7917c00SJeff Kirsher spin_unlock_irq(&adapter->sge.reg_lock); 356f7917c00SJeff Kirsher break; 357f7917c00SJeff Kirsher } 358f7917c00SJeff Kirsher case RDMA_GET_MIB: { 359f7917c00SJeff Kirsher spin_lock(&adapter->stats_lock); 360f7917c00SJeff Kirsher t3_tp_get_mib_stats(adapter, (struct tp_mib_stats *)data); 361f7917c00SJeff Kirsher spin_unlock(&adapter->stats_lock); 362f7917c00SJeff Kirsher break; 363f7917c00SJeff Kirsher } 364f7917c00SJeff Kirsher default: 365f7917c00SJeff Kirsher ret = -EOPNOTSUPP; 366f7917c00SJeff Kirsher } 367f7917c00SJeff Kirsher return ret; 368f7917c00SJeff Kirsher } 369f7917c00SJeff Kirsher 370f7917c00SJeff Kirsher static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data) 371f7917c00SJeff Kirsher { 372f7917c00SJeff Kirsher struct adapter *adapter = tdev2adap(tdev); 373f7917c00SJeff Kirsher struct tid_range *tid; 374f7917c00SJeff Kirsher struct mtutab *mtup; 375f7917c00SJeff Kirsher struct iff_mac *iffmacp; 376f7917c00SJeff Kirsher struct ddp_params *ddpp; 377f7917c00SJeff Kirsher struct adap_ports *ports; 378f7917c00SJeff Kirsher struct ofld_page_info *rx_page_info; 379f7917c00SJeff Kirsher struct tp_params *tp = &adapter->params.tp; 380f7917c00SJeff Kirsher int i; 381f7917c00SJeff Kirsher 382f7917c00SJeff Kirsher switch (req) { 383f7917c00SJeff Kirsher case GET_MAX_OUTSTANDING_WR: 384f7917c00SJeff Kirsher *(unsigned int *)data = FW_WR_NUM; 385f7917c00SJeff Kirsher break; 386f7917c00SJeff Kirsher case GET_WR_LEN: 387f7917c00SJeff Kirsher *(unsigned int *)data = WR_FLITS; 388f7917c00SJeff Kirsher break; 389f7917c00SJeff Kirsher case GET_TX_MAX_CHUNK: 390f7917c00SJeff Kirsher *(unsigned int *)data = 1 << 20; /* 1MB */ 391f7917c00SJeff Kirsher break; 392f7917c00SJeff Kirsher case GET_TID_RANGE: 393f7917c00SJeff Kirsher tid = data; 394f7917c00SJeff Kirsher tid->num = t3_mc5_size(&adapter->mc5) - 395f7917c00SJeff Kirsher adapter->params.mc5.nroutes - 396f7917c00SJeff Kirsher adapter->params.mc5.nfilters - adapter->params.mc5.nservers; 397f7917c00SJeff Kirsher tid->base = 0; 398f7917c00SJeff Kirsher break; 399f7917c00SJeff Kirsher case GET_STID_RANGE: 400f7917c00SJeff Kirsher tid = data; 401f7917c00SJeff Kirsher tid->num = adapter->params.mc5.nservers; 402f7917c00SJeff Kirsher tid->base = t3_mc5_size(&adapter->mc5) - tid->num - 403f7917c00SJeff Kirsher adapter->params.mc5.nfilters - adapter->params.mc5.nroutes; 404f7917c00SJeff Kirsher break; 405f7917c00SJeff Kirsher case GET_L2T_CAPACITY: 406f7917c00SJeff Kirsher *(unsigned int *)data = 2048; 407f7917c00SJeff Kirsher break; 408f7917c00SJeff Kirsher case GET_MTUS: 409f7917c00SJeff Kirsher mtup = data; 410f7917c00SJeff Kirsher mtup->size = NMTUS; 411f7917c00SJeff Kirsher mtup->mtus = adapter->params.mtus; 412f7917c00SJeff Kirsher break; 413f7917c00SJeff Kirsher case GET_IFF_FROM_MAC: 414f7917c00SJeff Kirsher iffmacp = data; 415f7917c00SJeff Kirsher iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr, 416f7917c00SJeff Kirsher iffmacp->vlan_tag & 417f7917c00SJeff Kirsher VLAN_VID_MASK); 418f7917c00SJeff Kirsher break; 419f7917c00SJeff Kirsher case GET_DDP_PARAMS: 420f7917c00SJeff Kirsher ddpp = data; 421f7917c00SJeff Kirsher ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT); 422f7917c00SJeff Kirsher ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT); 423f7917c00SJeff Kirsher ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK); 424f7917c00SJeff Kirsher break; 425f7917c00SJeff Kirsher case GET_PORTS: 426f7917c00SJeff Kirsher ports = data; 427f7917c00SJeff Kirsher ports->nports = adapter->params.nports; 428f7917c00SJeff Kirsher for_each_port(adapter, i) 429f7917c00SJeff Kirsher ports->lldevs[i] = adapter->port[i]; 430f7917c00SJeff Kirsher break; 431f7917c00SJeff Kirsher case ULP_ISCSI_GET_PARAMS: 432f7917c00SJeff Kirsher case ULP_ISCSI_SET_PARAMS: 433f7917c00SJeff Kirsher if (!offload_running(adapter)) 434f7917c00SJeff Kirsher return -EAGAIN; 435f7917c00SJeff Kirsher return cxgb_ulp_iscsi_ctl(adapter, req, data); 436f7917c00SJeff Kirsher case RDMA_GET_PARAMS: 437f7917c00SJeff Kirsher case RDMA_CQ_OP: 438f7917c00SJeff Kirsher case RDMA_CQ_SETUP: 439f7917c00SJeff Kirsher case RDMA_CQ_DISABLE: 440f7917c00SJeff Kirsher case RDMA_CTRL_QP_SETUP: 441f7917c00SJeff Kirsher case RDMA_GET_MEM: 442f7917c00SJeff Kirsher case RDMA_GET_MIB: 443f7917c00SJeff Kirsher if (!offload_running(adapter)) 444f7917c00SJeff Kirsher return -EAGAIN; 445f7917c00SJeff Kirsher return cxgb_rdma_ctl(adapter, req, data); 446f7917c00SJeff Kirsher case GET_RX_PAGE_INFO: 447f7917c00SJeff Kirsher rx_page_info = data; 448f7917c00SJeff Kirsher rx_page_info->page_size = tp->rx_pg_size; 449f7917c00SJeff Kirsher rx_page_info->num = tp->rx_num_pgs; 450f7917c00SJeff Kirsher break; 451f7917c00SJeff Kirsher case GET_ISCSI_IPV4ADDR: { 452f7917c00SJeff Kirsher struct iscsi_ipv4addr *p = data; 453f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(p->dev); 454f7917c00SJeff Kirsher p->ipv4addr = pi->iscsi_ipv4addr; 455f7917c00SJeff Kirsher break; 456f7917c00SJeff Kirsher } 457f7917c00SJeff Kirsher case GET_EMBEDDED_INFO: { 458f7917c00SJeff Kirsher struct ch_embedded_info *e = data; 459f7917c00SJeff Kirsher 460f7917c00SJeff Kirsher spin_lock(&adapter->stats_lock); 461f7917c00SJeff Kirsher t3_get_fw_version(adapter, &e->fw_vers); 462f7917c00SJeff Kirsher t3_get_tp_version(adapter, &e->tp_vers); 463f7917c00SJeff Kirsher spin_unlock(&adapter->stats_lock); 464f7917c00SJeff Kirsher break; 465f7917c00SJeff Kirsher } 466f7917c00SJeff Kirsher default: 467f7917c00SJeff Kirsher return -EOPNOTSUPP; 468f7917c00SJeff Kirsher } 469f7917c00SJeff Kirsher return 0; 470f7917c00SJeff Kirsher } 471f7917c00SJeff Kirsher 472f7917c00SJeff Kirsher /* 473f7917c00SJeff Kirsher * Dummy handler for Rx offload packets in case we get an offload packet before 474f7917c00SJeff Kirsher * proper processing is setup. This complains and drops the packet as it isn't 475f7917c00SJeff Kirsher * normal to get offload packets at this stage. 476f7917c00SJeff Kirsher */ 477f7917c00SJeff Kirsher static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs, 478f7917c00SJeff Kirsher int n) 479f7917c00SJeff Kirsher { 480f7917c00SJeff Kirsher while (n--) 481f7917c00SJeff Kirsher dev_kfree_skb_any(skbs[n]); 482f7917c00SJeff Kirsher return 0; 483f7917c00SJeff Kirsher } 484f7917c00SJeff Kirsher 485f7917c00SJeff Kirsher static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh) 486f7917c00SJeff Kirsher { 487f7917c00SJeff Kirsher } 488f7917c00SJeff Kirsher 489f7917c00SJeff Kirsher void cxgb3_set_dummy_ops(struct t3cdev *dev) 490f7917c00SJeff Kirsher { 491f7917c00SJeff Kirsher dev->recv = rx_offload_blackhole; 492f7917c00SJeff Kirsher dev->neigh_update = dummy_neigh_update; 493f7917c00SJeff Kirsher } 494f7917c00SJeff Kirsher 495f7917c00SJeff Kirsher /* 496f7917c00SJeff Kirsher * Free an active-open TID. 497f7917c00SJeff Kirsher */ 498f7917c00SJeff Kirsher void *cxgb3_free_atid(struct t3cdev *tdev, int atid) 499f7917c00SJeff Kirsher { 500f7917c00SJeff Kirsher struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 501f7917c00SJeff Kirsher union active_open_entry *p = atid2entry(t, atid); 502f7917c00SJeff Kirsher void *ctx = p->t3c_tid.ctx; 503f7917c00SJeff Kirsher 504f7917c00SJeff Kirsher spin_lock_bh(&t->atid_lock); 505f7917c00SJeff Kirsher p->next = t->afree; 506f7917c00SJeff Kirsher t->afree = p; 507f7917c00SJeff Kirsher t->atids_in_use--; 508f7917c00SJeff Kirsher spin_unlock_bh(&t->atid_lock); 509f7917c00SJeff Kirsher 510f7917c00SJeff Kirsher return ctx; 511f7917c00SJeff Kirsher } 512f7917c00SJeff Kirsher 513f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb3_free_atid); 514f7917c00SJeff Kirsher 515f7917c00SJeff Kirsher /* 516f7917c00SJeff Kirsher * Free a server TID and return it to the free pool. 517f7917c00SJeff Kirsher */ 518f7917c00SJeff Kirsher void cxgb3_free_stid(struct t3cdev *tdev, int stid) 519f7917c00SJeff Kirsher { 520f7917c00SJeff Kirsher struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 521f7917c00SJeff Kirsher union listen_entry *p = stid2entry(t, stid); 522f7917c00SJeff Kirsher 523f7917c00SJeff Kirsher spin_lock_bh(&t->stid_lock); 524f7917c00SJeff Kirsher p->next = t->sfree; 525f7917c00SJeff Kirsher t->sfree = p; 526f7917c00SJeff Kirsher t->stids_in_use--; 527f7917c00SJeff Kirsher spin_unlock_bh(&t->stid_lock); 528f7917c00SJeff Kirsher } 529f7917c00SJeff Kirsher 530f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb3_free_stid); 531f7917c00SJeff Kirsher 532f7917c00SJeff Kirsher void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client, 533f7917c00SJeff Kirsher void *ctx, unsigned int tid) 534f7917c00SJeff Kirsher { 535f7917c00SJeff Kirsher struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 536f7917c00SJeff Kirsher 537f7917c00SJeff Kirsher t->tid_tab[tid].client = client; 538f7917c00SJeff Kirsher t->tid_tab[tid].ctx = ctx; 539f7917c00SJeff Kirsher atomic_inc(&t->tids_in_use); 540f7917c00SJeff Kirsher } 541f7917c00SJeff Kirsher 542f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb3_insert_tid); 543f7917c00SJeff Kirsher 544f7917c00SJeff Kirsher /* 545f7917c00SJeff Kirsher * Populate a TID_RELEASE WR. The skb must be already propely sized. 546f7917c00SJeff Kirsher */ 547f7917c00SJeff Kirsher static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid) 548f7917c00SJeff Kirsher { 549f7917c00SJeff Kirsher struct cpl_tid_release *req; 550f7917c00SJeff Kirsher 551f7917c00SJeff Kirsher skb->priority = CPL_PRIORITY_SETUP; 552f7917c00SJeff Kirsher req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); 553f7917c00SJeff Kirsher req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 554f7917c00SJeff Kirsher OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); 555f7917c00SJeff Kirsher } 556f7917c00SJeff Kirsher 557f7917c00SJeff Kirsher static void t3_process_tid_release_list(struct work_struct *work) 558f7917c00SJeff Kirsher { 559f7917c00SJeff Kirsher struct t3c_data *td = container_of(work, struct t3c_data, 560f7917c00SJeff Kirsher tid_release_task); 561f7917c00SJeff Kirsher struct sk_buff *skb; 562f7917c00SJeff Kirsher struct t3cdev *tdev = td->dev; 563f7917c00SJeff Kirsher 564f7917c00SJeff Kirsher 565f7917c00SJeff Kirsher spin_lock_bh(&td->tid_release_lock); 566f7917c00SJeff Kirsher while (td->tid_release_list) { 567f7917c00SJeff Kirsher struct t3c_tid_entry *p = td->tid_release_list; 568f7917c00SJeff Kirsher 569f7917c00SJeff Kirsher td->tid_release_list = p->ctx; 570f7917c00SJeff Kirsher spin_unlock_bh(&td->tid_release_lock); 571f7917c00SJeff Kirsher 572f7917c00SJeff Kirsher skb = alloc_skb(sizeof(struct cpl_tid_release), 573f7917c00SJeff Kirsher GFP_KERNEL); 574f7917c00SJeff Kirsher if (!skb) 575f7917c00SJeff Kirsher skb = td->nofail_skb; 576f7917c00SJeff Kirsher if (!skb) { 577f7917c00SJeff Kirsher spin_lock_bh(&td->tid_release_lock); 578f7917c00SJeff Kirsher p->ctx = (void *)td->tid_release_list; 57964699336SJoe Perches td->tid_release_list = p; 580f7917c00SJeff Kirsher break; 581f7917c00SJeff Kirsher } 582f7917c00SJeff Kirsher mk_tid_release(skb, p - td->tid_maps.tid_tab); 583f7917c00SJeff Kirsher cxgb3_ofld_send(tdev, skb); 584f7917c00SJeff Kirsher p->ctx = NULL; 585f7917c00SJeff Kirsher if (skb == td->nofail_skb) 586f7917c00SJeff Kirsher td->nofail_skb = 587f7917c00SJeff Kirsher alloc_skb(sizeof(struct cpl_tid_release), 588f7917c00SJeff Kirsher GFP_KERNEL); 589f7917c00SJeff Kirsher spin_lock_bh(&td->tid_release_lock); 590f7917c00SJeff Kirsher } 591f7917c00SJeff Kirsher td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1; 592f7917c00SJeff Kirsher spin_unlock_bh(&td->tid_release_lock); 593f7917c00SJeff Kirsher 594f7917c00SJeff Kirsher if (!td->nofail_skb) 595f7917c00SJeff Kirsher td->nofail_skb = 596f7917c00SJeff Kirsher alloc_skb(sizeof(struct cpl_tid_release), 597f7917c00SJeff Kirsher GFP_KERNEL); 598f7917c00SJeff Kirsher } 599f7917c00SJeff Kirsher 600f7917c00SJeff Kirsher /* use ctx as a next pointer in the tid release list */ 601f7917c00SJeff Kirsher void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid) 602f7917c00SJeff Kirsher { 603f7917c00SJeff Kirsher struct t3c_data *td = T3C_DATA(tdev); 604f7917c00SJeff Kirsher struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid]; 605f7917c00SJeff Kirsher 606f7917c00SJeff Kirsher spin_lock_bh(&td->tid_release_lock); 607f7917c00SJeff Kirsher p->ctx = (void *)td->tid_release_list; 608f7917c00SJeff Kirsher p->client = NULL; 609f7917c00SJeff Kirsher td->tid_release_list = p; 610f7917c00SJeff Kirsher if (!p->ctx || td->release_list_incomplete) 611f7917c00SJeff Kirsher schedule_work(&td->tid_release_task); 612f7917c00SJeff Kirsher spin_unlock_bh(&td->tid_release_lock); 613f7917c00SJeff Kirsher } 614f7917c00SJeff Kirsher 615f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb3_queue_tid_release); 616f7917c00SJeff Kirsher 617f7917c00SJeff Kirsher /* 618f7917c00SJeff Kirsher * Remove a tid from the TID table. A client may defer processing its last 619f7917c00SJeff Kirsher * CPL message if it is locked at the time it arrives, and while the message 620f7917c00SJeff Kirsher * sits in the client's backlog the TID may be reused for another connection. 621f7917c00SJeff Kirsher * To handle this we atomically switch the TID association if it still points 622f7917c00SJeff Kirsher * to the original client context. 623f7917c00SJeff Kirsher */ 624f7917c00SJeff Kirsher void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid) 625f7917c00SJeff Kirsher { 626f7917c00SJeff Kirsher struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 627f7917c00SJeff Kirsher 628f7917c00SJeff Kirsher BUG_ON(tid >= t->ntids); 629f7917c00SJeff Kirsher if (tdev->type == T3A) 630f7917c00SJeff Kirsher (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL); 631f7917c00SJeff Kirsher else { 632f7917c00SJeff Kirsher struct sk_buff *skb; 633f7917c00SJeff Kirsher 634f7917c00SJeff Kirsher skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); 635f7917c00SJeff Kirsher if (likely(skb)) { 636f7917c00SJeff Kirsher mk_tid_release(skb, tid); 637f7917c00SJeff Kirsher cxgb3_ofld_send(tdev, skb); 638f7917c00SJeff Kirsher t->tid_tab[tid].ctx = NULL; 639f7917c00SJeff Kirsher } else 640f7917c00SJeff Kirsher cxgb3_queue_tid_release(tdev, tid); 641f7917c00SJeff Kirsher } 642f7917c00SJeff Kirsher atomic_dec(&t->tids_in_use); 643f7917c00SJeff Kirsher } 644f7917c00SJeff Kirsher 645f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb3_remove_tid); 646f7917c00SJeff Kirsher 647f7917c00SJeff Kirsher int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client, 648f7917c00SJeff Kirsher void *ctx) 649f7917c00SJeff Kirsher { 650f7917c00SJeff Kirsher int atid = -1; 651f7917c00SJeff Kirsher struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 652f7917c00SJeff Kirsher 653f7917c00SJeff Kirsher spin_lock_bh(&t->atid_lock); 654f7917c00SJeff Kirsher if (t->afree && 655f7917c00SJeff Kirsher t->atids_in_use + atomic_read(&t->tids_in_use) + MC5_MIN_TIDS <= 656f7917c00SJeff Kirsher t->ntids) { 657f7917c00SJeff Kirsher union active_open_entry *p = t->afree; 658f7917c00SJeff Kirsher 659f7917c00SJeff Kirsher atid = (p - t->atid_tab) + t->atid_base; 660f7917c00SJeff Kirsher t->afree = p->next; 661f7917c00SJeff Kirsher p->t3c_tid.ctx = ctx; 662f7917c00SJeff Kirsher p->t3c_tid.client = client; 663f7917c00SJeff Kirsher t->atids_in_use++; 664f7917c00SJeff Kirsher } 665f7917c00SJeff Kirsher spin_unlock_bh(&t->atid_lock); 666f7917c00SJeff Kirsher return atid; 667f7917c00SJeff Kirsher } 668f7917c00SJeff Kirsher 669f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb3_alloc_atid); 670f7917c00SJeff Kirsher 671f7917c00SJeff Kirsher int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client, 672f7917c00SJeff Kirsher void *ctx) 673f7917c00SJeff Kirsher { 674f7917c00SJeff Kirsher int stid = -1; 675f7917c00SJeff Kirsher struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 676f7917c00SJeff Kirsher 677f7917c00SJeff Kirsher spin_lock_bh(&t->stid_lock); 678f7917c00SJeff Kirsher if (t->sfree) { 679f7917c00SJeff Kirsher union listen_entry *p = t->sfree; 680f7917c00SJeff Kirsher 681f7917c00SJeff Kirsher stid = (p - t->stid_tab) + t->stid_base; 682f7917c00SJeff Kirsher t->sfree = p->next; 683f7917c00SJeff Kirsher p->t3c_tid.ctx = ctx; 684f7917c00SJeff Kirsher p->t3c_tid.client = client; 685f7917c00SJeff Kirsher t->stids_in_use++; 686f7917c00SJeff Kirsher } 687f7917c00SJeff Kirsher spin_unlock_bh(&t->stid_lock); 688f7917c00SJeff Kirsher return stid; 689f7917c00SJeff Kirsher } 690f7917c00SJeff Kirsher 691f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb3_alloc_stid); 692f7917c00SJeff Kirsher 693f7917c00SJeff Kirsher /* Get the t3cdev associated with a net_device */ 694f7917c00SJeff Kirsher struct t3cdev *dev2t3cdev(struct net_device *dev) 695f7917c00SJeff Kirsher { 696f7917c00SJeff Kirsher const struct port_info *pi = netdev_priv(dev); 697f7917c00SJeff Kirsher 698f7917c00SJeff Kirsher return (struct t3cdev *)pi->adapter; 699f7917c00SJeff Kirsher } 700f7917c00SJeff Kirsher 701f7917c00SJeff Kirsher EXPORT_SYMBOL(dev2t3cdev); 702f7917c00SJeff Kirsher 703f7917c00SJeff Kirsher static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb) 704f7917c00SJeff Kirsher { 705f7917c00SJeff Kirsher struct cpl_smt_write_rpl *rpl = cplhdr(skb); 706f7917c00SJeff Kirsher 707f7917c00SJeff Kirsher if (rpl->status != CPL_ERR_NONE) 708f7917c00SJeff Kirsher printk(KERN_ERR 709f7917c00SJeff Kirsher "Unexpected SMT_WRITE_RPL status %u for entry %u\n", 710f7917c00SJeff Kirsher rpl->status, GET_TID(rpl)); 711f7917c00SJeff Kirsher 712f7917c00SJeff Kirsher return CPL_RET_BUF_DONE; 713f7917c00SJeff Kirsher } 714f7917c00SJeff Kirsher 715f7917c00SJeff Kirsher static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb) 716f7917c00SJeff Kirsher { 717f7917c00SJeff Kirsher struct cpl_l2t_write_rpl *rpl = cplhdr(skb); 718f7917c00SJeff Kirsher 719f7917c00SJeff Kirsher if (rpl->status != CPL_ERR_NONE) 720f7917c00SJeff Kirsher printk(KERN_ERR 721f7917c00SJeff Kirsher "Unexpected L2T_WRITE_RPL status %u for entry %u\n", 722f7917c00SJeff Kirsher rpl->status, GET_TID(rpl)); 723f7917c00SJeff Kirsher 724f7917c00SJeff Kirsher return CPL_RET_BUF_DONE; 725f7917c00SJeff Kirsher } 726f7917c00SJeff Kirsher 727f7917c00SJeff Kirsher static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb) 728f7917c00SJeff Kirsher { 729f7917c00SJeff Kirsher struct cpl_rte_write_rpl *rpl = cplhdr(skb); 730f7917c00SJeff Kirsher 731f7917c00SJeff Kirsher if (rpl->status != CPL_ERR_NONE) 732f7917c00SJeff Kirsher printk(KERN_ERR 733f7917c00SJeff Kirsher "Unexpected RTE_WRITE_RPL status %u for entry %u\n", 734f7917c00SJeff Kirsher rpl->status, GET_TID(rpl)); 735f7917c00SJeff Kirsher 736f7917c00SJeff Kirsher return CPL_RET_BUF_DONE; 737f7917c00SJeff Kirsher } 738f7917c00SJeff Kirsher 739f7917c00SJeff Kirsher static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb) 740f7917c00SJeff Kirsher { 741f7917c00SJeff Kirsher struct cpl_act_open_rpl *rpl = cplhdr(skb); 742f7917c00SJeff Kirsher unsigned int atid = G_TID(ntohl(rpl->atid)); 743f7917c00SJeff Kirsher struct t3c_tid_entry *t3c_tid; 744f7917c00SJeff Kirsher 745f7917c00SJeff Kirsher t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); 746f7917c00SJeff Kirsher if (t3c_tid && t3c_tid->ctx && t3c_tid->client && 747f7917c00SJeff Kirsher t3c_tid->client->handlers && 748f7917c00SJeff Kirsher t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) { 749f7917c00SJeff Kirsher return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb, 750f7917c00SJeff Kirsher t3c_tid-> 751f7917c00SJeff Kirsher ctx); 752f7917c00SJeff Kirsher } else { 753f7917c00SJeff Kirsher printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 754f7917c00SJeff Kirsher dev->name, CPL_ACT_OPEN_RPL); 755f7917c00SJeff Kirsher return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 756f7917c00SJeff Kirsher } 757f7917c00SJeff Kirsher } 758f7917c00SJeff Kirsher 759f7917c00SJeff Kirsher static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb) 760f7917c00SJeff Kirsher { 761f7917c00SJeff Kirsher union opcode_tid *p = cplhdr(skb); 762f7917c00SJeff Kirsher unsigned int stid = G_TID(ntohl(p->opcode_tid)); 763f7917c00SJeff Kirsher struct t3c_tid_entry *t3c_tid; 764f7917c00SJeff Kirsher 765f7917c00SJeff Kirsher t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid); 766f7917c00SJeff Kirsher if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 767f7917c00SJeff Kirsher t3c_tid->client->handlers[p->opcode]) { 768f7917c00SJeff Kirsher return t3c_tid->client->handlers[p->opcode] (dev, skb, 769f7917c00SJeff Kirsher t3c_tid->ctx); 770f7917c00SJeff Kirsher } else { 771f7917c00SJeff Kirsher printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 772f7917c00SJeff Kirsher dev->name, p->opcode); 773f7917c00SJeff Kirsher return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 774f7917c00SJeff Kirsher } 775f7917c00SJeff Kirsher } 776f7917c00SJeff Kirsher 777f7917c00SJeff Kirsher static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb) 778f7917c00SJeff Kirsher { 779f7917c00SJeff Kirsher union opcode_tid *p = cplhdr(skb); 780f7917c00SJeff Kirsher unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); 781f7917c00SJeff Kirsher struct t3c_tid_entry *t3c_tid; 782f7917c00SJeff Kirsher 783f7917c00SJeff Kirsher t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 784f7917c00SJeff Kirsher if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 785f7917c00SJeff Kirsher t3c_tid->client->handlers[p->opcode]) { 786f7917c00SJeff Kirsher return t3c_tid->client->handlers[p->opcode] 787f7917c00SJeff Kirsher (dev, skb, t3c_tid->ctx); 788f7917c00SJeff Kirsher } else { 789f7917c00SJeff Kirsher printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 790f7917c00SJeff Kirsher dev->name, p->opcode); 791f7917c00SJeff Kirsher return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 792f7917c00SJeff Kirsher } 793f7917c00SJeff Kirsher } 794f7917c00SJeff Kirsher 795f7917c00SJeff Kirsher static int do_cr(struct t3cdev *dev, struct sk_buff *skb) 796f7917c00SJeff Kirsher { 797f7917c00SJeff Kirsher struct cpl_pass_accept_req *req = cplhdr(skb); 798f7917c00SJeff Kirsher unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); 799f7917c00SJeff Kirsher struct tid_info *t = &(T3C_DATA(dev))->tid_maps; 800f7917c00SJeff Kirsher struct t3c_tid_entry *t3c_tid; 801f7917c00SJeff Kirsher unsigned int tid = GET_TID(req); 802f7917c00SJeff Kirsher 803f7917c00SJeff Kirsher if (unlikely(tid >= t->ntids)) { 804f7917c00SJeff Kirsher printk("%s: passive open TID %u too large\n", 805f7917c00SJeff Kirsher dev->name, tid); 806f7917c00SJeff Kirsher t3_fatal_err(tdev2adap(dev)); 807f7917c00SJeff Kirsher return CPL_RET_BUF_DONE; 808f7917c00SJeff Kirsher } 809f7917c00SJeff Kirsher 810f7917c00SJeff Kirsher t3c_tid = lookup_stid(t, stid); 811f7917c00SJeff Kirsher if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 812f7917c00SJeff Kirsher t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) { 813f7917c00SJeff Kirsher return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ] 814f7917c00SJeff Kirsher (dev, skb, t3c_tid->ctx); 815f7917c00SJeff Kirsher } else { 816f7917c00SJeff Kirsher printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 817f7917c00SJeff Kirsher dev->name, CPL_PASS_ACCEPT_REQ); 818f7917c00SJeff Kirsher return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 819f7917c00SJeff Kirsher } 820f7917c00SJeff Kirsher } 821f7917c00SJeff Kirsher 822f7917c00SJeff Kirsher /* 823f7917c00SJeff Kirsher * Returns an sk_buff for a reply CPL message of size len. If the input 824f7917c00SJeff Kirsher * sk_buff has no other users it is trimmed and reused, otherwise a new buffer 825f7917c00SJeff Kirsher * is allocated. The input skb must be of size at least len. Note that this 826f7917c00SJeff Kirsher * operation does not destroy the original skb data even if it decides to reuse 827f7917c00SJeff Kirsher * the buffer. 828f7917c00SJeff Kirsher */ 829f7917c00SJeff Kirsher static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len, 830f7917c00SJeff Kirsher gfp_t gfp) 831f7917c00SJeff Kirsher { 832f7917c00SJeff Kirsher if (likely(!skb_cloned(skb))) { 833f7917c00SJeff Kirsher BUG_ON(skb->len < len); 834f7917c00SJeff Kirsher __skb_trim(skb, len); 835f7917c00SJeff Kirsher skb_get(skb); 836f7917c00SJeff Kirsher } else { 837f7917c00SJeff Kirsher skb = alloc_skb(len, gfp); 838f7917c00SJeff Kirsher if (skb) 839f7917c00SJeff Kirsher __skb_put(skb, len); 840f7917c00SJeff Kirsher } 841f7917c00SJeff Kirsher return skb; 842f7917c00SJeff Kirsher } 843f7917c00SJeff Kirsher 844f7917c00SJeff Kirsher static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb) 845f7917c00SJeff Kirsher { 846f7917c00SJeff Kirsher union opcode_tid *p = cplhdr(skb); 847f7917c00SJeff Kirsher unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); 848f7917c00SJeff Kirsher struct t3c_tid_entry *t3c_tid; 849f7917c00SJeff Kirsher 850f7917c00SJeff Kirsher t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 851f7917c00SJeff Kirsher if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 852f7917c00SJeff Kirsher t3c_tid->client->handlers[p->opcode]) { 853f7917c00SJeff Kirsher return t3c_tid->client->handlers[p->opcode] 854f7917c00SJeff Kirsher (dev, skb, t3c_tid->ctx); 855f7917c00SJeff Kirsher } else { 856f7917c00SJeff Kirsher struct cpl_abort_req_rss *req = cplhdr(skb); 857f7917c00SJeff Kirsher struct cpl_abort_rpl *rpl; 858f7917c00SJeff Kirsher struct sk_buff *reply_skb; 859f7917c00SJeff Kirsher unsigned int tid = GET_TID(req); 860f7917c00SJeff Kirsher u8 cmd = req->status; 861f7917c00SJeff Kirsher 862f7917c00SJeff Kirsher if (req->status == CPL_ERR_RTX_NEG_ADVICE || 863f7917c00SJeff Kirsher req->status == CPL_ERR_PERSIST_NEG_ADVICE) 864f7917c00SJeff Kirsher goto out; 865f7917c00SJeff Kirsher 866f7917c00SJeff Kirsher reply_skb = cxgb3_get_cpl_reply_skb(skb, 867f7917c00SJeff Kirsher sizeof(struct 868f7917c00SJeff Kirsher cpl_abort_rpl), 869f7917c00SJeff Kirsher GFP_ATOMIC); 870f7917c00SJeff Kirsher 871f7917c00SJeff Kirsher if (!reply_skb) { 872f7917c00SJeff Kirsher printk("do_abort_req_rss: couldn't get skb!\n"); 873f7917c00SJeff Kirsher goto out; 874f7917c00SJeff Kirsher } 875f7917c00SJeff Kirsher reply_skb->priority = CPL_PRIORITY_DATA; 876f7917c00SJeff Kirsher __skb_put(reply_skb, sizeof(struct cpl_abort_rpl)); 877f7917c00SJeff Kirsher rpl = cplhdr(reply_skb); 878f7917c00SJeff Kirsher rpl->wr.wr_hi = 879f7917c00SJeff Kirsher htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); 880f7917c00SJeff Kirsher rpl->wr.wr_lo = htonl(V_WR_TID(tid)); 881f7917c00SJeff Kirsher OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid)); 882f7917c00SJeff Kirsher rpl->cmd = cmd; 883f7917c00SJeff Kirsher cxgb3_ofld_send(dev, reply_skb); 884f7917c00SJeff Kirsher out: 885f7917c00SJeff Kirsher return CPL_RET_BUF_DONE; 886f7917c00SJeff Kirsher } 887f7917c00SJeff Kirsher } 888f7917c00SJeff Kirsher 889f7917c00SJeff Kirsher static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb) 890f7917c00SJeff Kirsher { 891f7917c00SJeff Kirsher struct cpl_act_establish *req = cplhdr(skb); 892f7917c00SJeff Kirsher unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); 893f7917c00SJeff Kirsher struct tid_info *t = &(T3C_DATA(dev))->tid_maps; 894f7917c00SJeff Kirsher struct t3c_tid_entry *t3c_tid; 895f7917c00SJeff Kirsher unsigned int tid = GET_TID(req); 896f7917c00SJeff Kirsher 897f7917c00SJeff Kirsher if (unlikely(tid >= t->ntids)) { 898f7917c00SJeff Kirsher printk("%s: active establish TID %u too large\n", 899f7917c00SJeff Kirsher dev->name, tid); 900f7917c00SJeff Kirsher t3_fatal_err(tdev2adap(dev)); 901f7917c00SJeff Kirsher return CPL_RET_BUF_DONE; 902f7917c00SJeff Kirsher } 903f7917c00SJeff Kirsher 904f7917c00SJeff Kirsher t3c_tid = lookup_atid(t, atid); 905f7917c00SJeff Kirsher if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 906f7917c00SJeff Kirsher t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) { 907f7917c00SJeff Kirsher return t3c_tid->client->handlers[CPL_ACT_ESTABLISH] 908f7917c00SJeff Kirsher (dev, skb, t3c_tid->ctx); 909f7917c00SJeff Kirsher } else { 910f7917c00SJeff Kirsher printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 911f7917c00SJeff Kirsher dev->name, CPL_ACT_ESTABLISH); 912f7917c00SJeff Kirsher return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 913f7917c00SJeff Kirsher } 914f7917c00SJeff Kirsher } 915f7917c00SJeff Kirsher 916f7917c00SJeff Kirsher static int do_trace(struct t3cdev *dev, struct sk_buff *skb) 917f7917c00SJeff Kirsher { 918f7917c00SJeff Kirsher struct cpl_trace_pkt *p = cplhdr(skb); 919f7917c00SJeff Kirsher 920f7917c00SJeff Kirsher skb->protocol = htons(0xffff); 921f7917c00SJeff Kirsher skb->dev = dev->lldev; 922f7917c00SJeff Kirsher skb_pull(skb, sizeof(*p)); 923f7917c00SJeff Kirsher skb_reset_mac_header(skb); 924f7917c00SJeff Kirsher netif_receive_skb(skb); 925f7917c00SJeff Kirsher return 0; 926f7917c00SJeff Kirsher } 927f7917c00SJeff Kirsher 928f7917c00SJeff Kirsher /* 929f7917c00SJeff Kirsher * That skb would better have come from process_responses() where we abuse 930f7917c00SJeff Kirsher * ->priority and ->csum to carry our data. NB: if we get to per-arch 931f7917c00SJeff Kirsher * ->csum, the things might get really interesting here. 932f7917c00SJeff Kirsher */ 933f7917c00SJeff Kirsher 934f7917c00SJeff Kirsher static inline u32 get_hwtid(struct sk_buff *skb) 935f7917c00SJeff Kirsher { 936f7917c00SJeff Kirsher return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff; 937f7917c00SJeff Kirsher } 938f7917c00SJeff Kirsher 939f7917c00SJeff Kirsher static inline u32 get_opcode(struct sk_buff *skb) 940f7917c00SJeff Kirsher { 941f7917c00SJeff Kirsher return G_OPCODE(ntohl((__force __be32)skb->csum)); 942f7917c00SJeff Kirsher } 943f7917c00SJeff Kirsher 944f7917c00SJeff Kirsher static int do_term(struct t3cdev *dev, struct sk_buff *skb) 945f7917c00SJeff Kirsher { 946f7917c00SJeff Kirsher unsigned int hwtid = get_hwtid(skb); 947f7917c00SJeff Kirsher unsigned int opcode = get_opcode(skb); 948f7917c00SJeff Kirsher struct t3c_tid_entry *t3c_tid; 949f7917c00SJeff Kirsher 950f7917c00SJeff Kirsher t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 951f7917c00SJeff Kirsher if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 952f7917c00SJeff Kirsher t3c_tid->client->handlers[opcode]) { 953f7917c00SJeff Kirsher return t3c_tid->client->handlers[opcode] (dev, skb, 954f7917c00SJeff Kirsher t3c_tid->ctx); 955f7917c00SJeff Kirsher } else { 956f7917c00SJeff Kirsher printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 957f7917c00SJeff Kirsher dev->name, opcode); 958f7917c00SJeff Kirsher return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 959f7917c00SJeff Kirsher } 960f7917c00SJeff Kirsher } 961f7917c00SJeff Kirsher 962f7917c00SJeff Kirsher static int nb_callback(struct notifier_block *self, unsigned long event, 963f7917c00SJeff Kirsher void *ctx) 964f7917c00SJeff Kirsher { 965f7917c00SJeff Kirsher switch (event) { 966f7917c00SJeff Kirsher case (NETEVENT_NEIGH_UPDATE):{ 967f7917c00SJeff Kirsher cxgb_neigh_update((struct neighbour *)ctx); 968f7917c00SJeff Kirsher break; 969f7917c00SJeff Kirsher } 970f7917c00SJeff Kirsher case (NETEVENT_REDIRECT):{ 971f7917c00SJeff Kirsher struct netevent_redirect *nr = ctx; 9721d248b1cSDavid S. Miller cxgb_redirect(nr->old, nr->old_neigh, 9731d248b1cSDavid S. Miller nr->new, nr->new_neigh); 9741d248b1cSDavid S. Miller cxgb_neigh_update(nr->new_neigh); 975f7917c00SJeff Kirsher break; 976f7917c00SJeff Kirsher } 977f7917c00SJeff Kirsher default: 978f7917c00SJeff Kirsher break; 979f7917c00SJeff Kirsher } 980f7917c00SJeff Kirsher return 0; 981f7917c00SJeff Kirsher } 982f7917c00SJeff Kirsher 983f7917c00SJeff Kirsher static struct notifier_block nb = { 984f7917c00SJeff Kirsher .notifier_call = nb_callback 985f7917c00SJeff Kirsher }; 986f7917c00SJeff Kirsher 987f7917c00SJeff Kirsher /* 988f7917c00SJeff Kirsher * Process a received packet with an unknown/unexpected CPL opcode. 989f7917c00SJeff Kirsher */ 990f7917c00SJeff Kirsher static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb) 991f7917c00SJeff Kirsher { 992f7917c00SJeff Kirsher printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name, 993f7917c00SJeff Kirsher *skb->data); 994f7917c00SJeff Kirsher return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 995f7917c00SJeff Kirsher } 996f7917c00SJeff Kirsher 997f7917c00SJeff Kirsher /* 998f7917c00SJeff Kirsher * Handlers for each CPL opcode 999f7917c00SJeff Kirsher */ 1000f7917c00SJeff Kirsher static cpl_handler_func cpl_handlers[NUM_CPL_CMDS]; 1001f7917c00SJeff Kirsher 1002f7917c00SJeff Kirsher /* 1003f7917c00SJeff Kirsher * Add a new handler to the CPL dispatch table. A NULL handler may be supplied 1004f7917c00SJeff Kirsher * to unregister an existing handler. 1005f7917c00SJeff Kirsher */ 1006f7917c00SJeff Kirsher void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h) 1007f7917c00SJeff Kirsher { 1008f7917c00SJeff Kirsher if (opcode < NUM_CPL_CMDS) 1009f7917c00SJeff Kirsher cpl_handlers[opcode] = h ? h : do_bad_cpl; 1010f7917c00SJeff Kirsher else 1011f7917c00SJeff Kirsher printk(KERN_ERR "T3C: handler registration for " 1012f7917c00SJeff Kirsher "opcode %x failed\n", opcode); 1013f7917c00SJeff Kirsher } 1014f7917c00SJeff Kirsher 1015f7917c00SJeff Kirsher EXPORT_SYMBOL(t3_register_cpl_handler); 1016f7917c00SJeff Kirsher 1017f7917c00SJeff Kirsher /* 1018f7917c00SJeff Kirsher * T3CDEV's receive method. 1019f7917c00SJeff Kirsher */ 1020f7917c00SJeff Kirsher static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n) 1021f7917c00SJeff Kirsher { 1022f7917c00SJeff Kirsher while (n--) { 1023f7917c00SJeff Kirsher struct sk_buff *skb = *skbs++; 1024f7917c00SJeff Kirsher unsigned int opcode = get_opcode(skb); 1025f7917c00SJeff Kirsher int ret = cpl_handlers[opcode] (dev, skb); 1026f7917c00SJeff Kirsher 1027f7917c00SJeff Kirsher #if VALIDATE_TID 1028f7917c00SJeff Kirsher if (ret & CPL_RET_UNKNOWN_TID) { 1029f7917c00SJeff Kirsher union opcode_tid *p = cplhdr(skb); 1030f7917c00SJeff Kirsher 1031f7917c00SJeff Kirsher printk(KERN_ERR "%s: CPL message (opcode %u) had " 1032f7917c00SJeff Kirsher "unknown TID %u\n", dev->name, opcode, 1033f7917c00SJeff Kirsher G_TID(ntohl(p->opcode_tid))); 1034f7917c00SJeff Kirsher } 1035f7917c00SJeff Kirsher #endif 1036f7917c00SJeff Kirsher if (ret & CPL_RET_BUF_DONE) 1037f7917c00SJeff Kirsher kfree_skb(skb); 1038f7917c00SJeff Kirsher } 1039f7917c00SJeff Kirsher return 0; 1040f7917c00SJeff Kirsher } 1041f7917c00SJeff Kirsher 1042f7917c00SJeff Kirsher /* 1043f7917c00SJeff Kirsher * Sends an sk_buff to a T3C driver after dealing with any active network taps. 1044f7917c00SJeff Kirsher */ 1045f7917c00SJeff Kirsher int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb) 1046f7917c00SJeff Kirsher { 1047f7917c00SJeff Kirsher int r; 1048f7917c00SJeff Kirsher 1049f7917c00SJeff Kirsher local_bh_disable(); 1050f7917c00SJeff Kirsher r = dev->send(dev, skb); 1051f7917c00SJeff Kirsher local_bh_enable(); 1052f7917c00SJeff Kirsher return r; 1053f7917c00SJeff Kirsher } 1054f7917c00SJeff Kirsher 1055f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb3_ofld_send); 1056f7917c00SJeff Kirsher 1057f7917c00SJeff Kirsher static int is_offloading(struct net_device *dev) 1058f7917c00SJeff Kirsher { 1059f7917c00SJeff Kirsher struct adapter *adapter; 1060f7917c00SJeff Kirsher int i; 1061f7917c00SJeff Kirsher 1062f7917c00SJeff Kirsher read_lock_bh(&adapter_list_lock); 1063f7917c00SJeff Kirsher list_for_each_entry(adapter, &adapter_list, adapter_list) { 1064f7917c00SJeff Kirsher for_each_port(adapter, i) { 1065f7917c00SJeff Kirsher if (dev == adapter->port[i]) { 1066f7917c00SJeff Kirsher read_unlock_bh(&adapter_list_lock); 1067f7917c00SJeff Kirsher return 1; 1068f7917c00SJeff Kirsher } 1069f7917c00SJeff Kirsher } 1070f7917c00SJeff Kirsher } 1071f7917c00SJeff Kirsher read_unlock_bh(&adapter_list_lock); 1072f7917c00SJeff Kirsher return 0; 1073f7917c00SJeff Kirsher } 1074f7917c00SJeff Kirsher 1075f7917c00SJeff Kirsher static void cxgb_neigh_update(struct neighbour *neigh) 1076f7917c00SJeff Kirsher { 1077c4be62a4SDavid Miller struct net_device *dev; 1078f7917c00SJeff Kirsher 1079c4be62a4SDavid Miller if (!neigh) 1080c4be62a4SDavid Miller return; 1081c4be62a4SDavid Miller dev = neigh->dev; 1082f7917c00SJeff Kirsher if (dev && (is_offloading(dev))) { 1083f7917c00SJeff Kirsher struct t3cdev *tdev = dev2t3cdev(dev); 1084f7917c00SJeff Kirsher 1085f7917c00SJeff Kirsher BUG_ON(!tdev); 1086f7917c00SJeff Kirsher t3_l2t_update(tdev, neigh); 1087f7917c00SJeff Kirsher } 1088f7917c00SJeff Kirsher } 1089f7917c00SJeff Kirsher 1090f7917c00SJeff Kirsher static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e) 1091f7917c00SJeff Kirsher { 1092f7917c00SJeff Kirsher struct sk_buff *skb; 1093f7917c00SJeff Kirsher struct cpl_set_tcb_field *req; 1094f7917c00SJeff Kirsher 1095f7917c00SJeff Kirsher skb = alloc_skb(sizeof(*req), GFP_ATOMIC); 1096f7917c00SJeff Kirsher if (!skb) { 1097f7917c00SJeff Kirsher printk(KERN_ERR "%s: cannot allocate skb!\n", __func__); 1098f7917c00SJeff Kirsher return; 1099f7917c00SJeff Kirsher } 1100f7917c00SJeff Kirsher skb->priority = CPL_PRIORITY_CONTROL; 1101f7917c00SJeff Kirsher req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req)); 1102f7917c00SJeff Kirsher req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1103f7917c00SJeff Kirsher OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 1104f7917c00SJeff Kirsher req->reply = 0; 1105f7917c00SJeff Kirsher req->cpu_idx = 0; 1106f7917c00SJeff Kirsher req->word = htons(W_TCB_L2T_IX); 1107f7917c00SJeff Kirsher req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX)); 1108f7917c00SJeff Kirsher req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx)); 1109f7917c00SJeff Kirsher tdev->send(tdev, skb); 1110f7917c00SJeff Kirsher } 1111f7917c00SJeff Kirsher 11121d248b1cSDavid S. Miller static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh, 11131d248b1cSDavid S. Miller struct dst_entry *new, struct neighbour *new_neigh) 1114f7917c00SJeff Kirsher { 1115f7917c00SJeff Kirsher struct net_device *olddev, *newdev; 1116f7917c00SJeff Kirsher struct tid_info *ti; 1117f7917c00SJeff Kirsher struct t3cdev *tdev; 1118f7917c00SJeff Kirsher u32 tid; 1119f7917c00SJeff Kirsher int update_tcb; 1120f7917c00SJeff Kirsher struct l2t_entry *e; 1121f7917c00SJeff Kirsher struct t3c_tid_entry *te; 1122f7917c00SJeff Kirsher 11231d248b1cSDavid S. Miller olddev = old_neigh->dev; 11241d248b1cSDavid S. Miller newdev = new_neigh->dev; 1125c4be62a4SDavid Miller 1126f7917c00SJeff Kirsher if (!is_offloading(olddev)) 1127f7917c00SJeff Kirsher return; 1128f7917c00SJeff Kirsher if (!is_offloading(newdev)) { 1129f7917c00SJeff Kirsher printk(KERN_WARNING "%s: Redirect to non-offload " 1130f7917c00SJeff Kirsher "device ignored.\n", __func__); 1131f7917c00SJeff Kirsher return; 1132f7917c00SJeff Kirsher } 1133f7917c00SJeff Kirsher tdev = dev2t3cdev(olddev); 1134f7917c00SJeff Kirsher BUG_ON(!tdev); 1135f7917c00SJeff Kirsher if (tdev != dev2t3cdev(newdev)) { 1136f7917c00SJeff Kirsher printk(KERN_WARNING "%s: Redirect to different " 1137f7917c00SJeff Kirsher "offload device ignored.\n", __func__); 1138f7917c00SJeff Kirsher return; 1139f7917c00SJeff Kirsher } 1140f7917c00SJeff Kirsher 1141f7917c00SJeff Kirsher /* Add new L2T entry */ 1142a4757123SDavid Miller e = t3_l2t_get(tdev, new, newdev); 1143f7917c00SJeff Kirsher if (!e) { 1144f7917c00SJeff Kirsher printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n", 1145f7917c00SJeff Kirsher __func__); 1146f7917c00SJeff Kirsher return; 1147f7917c00SJeff Kirsher } 1148f7917c00SJeff Kirsher 1149f7917c00SJeff Kirsher /* Walk tid table and notify clients of dst change. */ 1150f7917c00SJeff Kirsher ti = &(T3C_DATA(tdev))->tid_maps; 1151f7917c00SJeff Kirsher for (tid = 0; tid < ti->ntids; tid++) { 1152f7917c00SJeff Kirsher te = lookup_tid(ti, tid); 1153f7917c00SJeff Kirsher BUG_ON(!te); 1154f7917c00SJeff Kirsher if (te && te->ctx && te->client && te->client->redirect) { 1155f7917c00SJeff Kirsher update_tcb = te->client->redirect(te->ctx, old, new, e); 1156f7917c00SJeff Kirsher if (update_tcb) { 115788c5100cSDavid S. Miller rcu_read_lock(); 1158f7917c00SJeff Kirsher l2t_hold(L2DATA(tdev), e); 115988c5100cSDavid S. Miller rcu_read_unlock(); 1160f7917c00SJeff Kirsher set_l2t_ix(tdev, tid, e); 1161f7917c00SJeff Kirsher } 1162f7917c00SJeff Kirsher } 1163f7917c00SJeff Kirsher } 116488c5100cSDavid S. Miller l2t_release(tdev, e); 1165f7917c00SJeff Kirsher } 1166f7917c00SJeff Kirsher 1167f7917c00SJeff Kirsher /* 1168f7917c00SJeff Kirsher * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. 1169f7917c00SJeff Kirsher * The allocated memory is cleared. 1170f7917c00SJeff Kirsher */ 1171f7917c00SJeff Kirsher void *cxgb_alloc_mem(unsigned long size) 1172f7917c00SJeff Kirsher { 1173f7917c00SJeff Kirsher void *p = kzalloc(size, GFP_KERNEL); 1174f7917c00SJeff Kirsher 1175f7917c00SJeff Kirsher if (!p) 1176f7917c00SJeff Kirsher p = vzalloc(size); 1177f7917c00SJeff Kirsher return p; 1178f7917c00SJeff Kirsher } 1179f7917c00SJeff Kirsher 1180f7917c00SJeff Kirsher /* 1181f7917c00SJeff Kirsher * Free memory allocated through t3_alloc_mem(). 1182f7917c00SJeff Kirsher */ 1183f7917c00SJeff Kirsher void cxgb_free_mem(void *addr) 1184f7917c00SJeff Kirsher { 1185f7917c00SJeff Kirsher if (is_vmalloc_addr(addr)) 1186f7917c00SJeff Kirsher vfree(addr); 1187f7917c00SJeff Kirsher else 1188f7917c00SJeff Kirsher kfree(addr); 1189f7917c00SJeff Kirsher } 1190f7917c00SJeff Kirsher 1191f7917c00SJeff Kirsher /* 1192f7917c00SJeff Kirsher * Allocate and initialize the TID tables. Returns 0 on success. 1193f7917c00SJeff Kirsher */ 1194f7917c00SJeff Kirsher static int init_tid_tabs(struct tid_info *t, unsigned int ntids, 1195f7917c00SJeff Kirsher unsigned int natids, unsigned int nstids, 1196f7917c00SJeff Kirsher unsigned int atid_base, unsigned int stid_base) 1197f7917c00SJeff Kirsher { 1198f7917c00SJeff Kirsher unsigned long size = ntids * sizeof(*t->tid_tab) + 1199f7917c00SJeff Kirsher natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab); 1200f7917c00SJeff Kirsher 1201f7917c00SJeff Kirsher t->tid_tab = cxgb_alloc_mem(size); 1202f7917c00SJeff Kirsher if (!t->tid_tab) 1203f7917c00SJeff Kirsher return -ENOMEM; 1204f7917c00SJeff Kirsher 1205f7917c00SJeff Kirsher t->stid_tab = (union listen_entry *)&t->tid_tab[ntids]; 1206f7917c00SJeff Kirsher t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids]; 1207f7917c00SJeff Kirsher t->ntids = ntids; 1208f7917c00SJeff Kirsher t->nstids = nstids; 1209f7917c00SJeff Kirsher t->stid_base = stid_base; 1210f7917c00SJeff Kirsher t->sfree = NULL; 1211f7917c00SJeff Kirsher t->natids = natids; 1212f7917c00SJeff Kirsher t->atid_base = atid_base; 1213f7917c00SJeff Kirsher t->afree = NULL; 1214f7917c00SJeff Kirsher t->stids_in_use = t->atids_in_use = 0; 1215f7917c00SJeff Kirsher atomic_set(&t->tids_in_use, 0); 1216f7917c00SJeff Kirsher spin_lock_init(&t->stid_lock); 1217f7917c00SJeff Kirsher spin_lock_init(&t->atid_lock); 1218f7917c00SJeff Kirsher 1219f7917c00SJeff Kirsher /* 1220f7917c00SJeff Kirsher * Setup the free lists for stid_tab and atid_tab. 1221f7917c00SJeff Kirsher */ 1222f7917c00SJeff Kirsher if (nstids) { 1223f7917c00SJeff Kirsher while (--nstids) 1224f7917c00SJeff Kirsher t->stid_tab[nstids - 1].next = &t->stid_tab[nstids]; 1225f7917c00SJeff Kirsher t->sfree = t->stid_tab; 1226f7917c00SJeff Kirsher } 1227f7917c00SJeff Kirsher if (natids) { 1228f7917c00SJeff Kirsher while (--natids) 1229f7917c00SJeff Kirsher t->atid_tab[natids - 1].next = &t->atid_tab[natids]; 1230f7917c00SJeff Kirsher t->afree = t->atid_tab; 1231f7917c00SJeff Kirsher } 1232f7917c00SJeff Kirsher return 0; 1233f7917c00SJeff Kirsher } 1234f7917c00SJeff Kirsher 1235f7917c00SJeff Kirsher static void free_tid_maps(struct tid_info *t) 1236f7917c00SJeff Kirsher { 1237f7917c00SJeff Kirsher cxgb_free_mem(t->tid_tab); 1238f7917c00SJeff Kirsher } 1239f7917c00SJeff Kirsher 1240f7917c00SJeff Kirsher static inline void add_adapter(struct adapter *adap) 1241f7917c00SJeff Kirsher { 1242f7917c00SJeff Kirsher write_lock_bh(&adapter_list_lock); 1243f7917c00SJeff Kirsher list_add_tail(&adap->adapter_list, &adapter_list); 1244f7917c00SJeff Kirsher write_unlock_bh(&adapter_list_lock); 1245f7917c00SJeff Kirsher } 1246f7917c00SJeff Kirsher 1247f7917c00SJeff Kirsher static inline void remove_adapter(struct adapter *adap) 1248f7917c00SJeff Kirsher { 1249f7917c00SJeff Kirsher write_lock_bh(&adapter_list_lock); 1250f7917c00SJeff Kirsher list_del(&adap->adapter_list); 1251f7917c00SJeff Kirsher write_unlock_bh(&adapter_list_lock); 1252f7917c00SJeff Kirsher } 1253f7917c00SJeff Kirsher 1254f7917c00SJeff Kirsher int cxgb3_offload_activate(struct adapter *adapter) 1255f7917c00SJeff Kirsher { 1256f7917c00SJeff Kirsher struct t3cdev *dev = &adapter->tdev; 1257f7917c00SJeff Kirsher int natids, err; 1258f7917c00SJeff Kirsher struct t3c_data *t; 1259f7917c00SJeff Kirsher struct tid_range stid_range, tid_range; 1260f7917c00SJeff Kirsher struct mtutab mtutab; 1261f7917c00SJeff Kirsher unsigned int l2t_capacity; 1262f7917c00SJeff Kirsher 1263f7917c00SJeff Kirsher t = kzalloc(sizeof(*t), GFP_KERNEL); 1264f7917c00SJeff Kirsher if (!t) 1265f7917c00SJeff Kirsher return -ENOMEM; 1266f7917c00SJeff Kirsher 1267f7917c00SJeff Kirsher err = -EOPNOTSUPP; 1268f7917c00SJeff Kirsher if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 || 1269f7917c00SJeff Kirsher dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 || 1270f7917c00SJeff Kirsher dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 || 1271f7917c00SJeff Kirsher dev->ctl(dev, GET_MTUS, &mtutab) < 0 || 1272f7917c00SJeff Kirsher dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 || 1273f7917c00SJeff Kirsher dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0) 1274f7917c00SJeff Kirsher goto out_free; 1275f7917c00SJeff Kirsher 1276f7917c00SJeff Kirsher err = -ENOMEM; 127788c5100cSDavid S. Miller RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity)); 1278f7917c00SJeff Kirsher if (!L2DATA(dev)) 1279f7917c00SJeff Kirsher goto out_free; 1280f7917c00SJeff Kirsher 1281f7917c00SJeff Kirsher natids = min(tid_range.num / 2, MAX_ATIDS); 1282f7917c00SJeff Kirsher err = init_tid_tabs(&t->tid_maps, tid_range.num, natids, 1283f7917c00SJeff Kirsher stid_range.num, ATID_BASE, stid_range.base); 1284f7917c00SJeff Kirsher if (err) 1285f7917c00SJeff Kirsher goto out_free_l2t; 1286f7917c00SJeff Kirsher 1287f7917c00SJeff Kirsher t->mtus = mtutab.mtus; 1288f7917c00SJeff Kirsher t->nmtus = mtutab.size; 1289f7917c00SJeff Kirsher 1290f7917c00SJeff Kirsher INIT_WORK(&t->tid_release_task, t3_process_tid_release_list); 1291f7917c00SJeff Kirsher spin_lock_init(&t->tid_release_lock); 1292f7917c00SJeff Kirsher INIT_LIST_HEAD(&t->list_node); 1293f7917c00SJeff Kirsher t->dev = dev; 1294f7917c00SJeff Kirsher 1295f7917c00SJeff Kirsher T3C_DATA(dev) = t; 1296f7917c00SJeff Kirsher dev->recv = process_rx; 1297f7917c00SJeff Kirsher dev->neigh_update = t3_l2t_update; 1298f7917c00SJeff Kirsher 1299f7917c00SJeff Kirsher /* Register netevent handler once */ 1300f7917c00SJeff Kirsher if (list_empty(&adapter_list)) 1301f7917c00SJeff Kirsher register_netevent_notifier(&nb); 1302f7917c00SJeff Kirsher 1303f7917c00SJeff Kirsher t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL); 1304f7917c00SJeff Kirsher t->release_list_incomplete = 0; 1305f7917c00SJeff Kirsher 1306f7917c00SJeff Kirsher add_adapter(adapter); 1307f7917c00SJeff Kirsher return 0; 1308f7917c00SJeff Kirsher 1309f7917c00SJeff Kirsher out_free_l2t: 1310f7917c00SJeff Kirsher t3_free_l2t(L2DATA(dev)); 13112cfa5a04SEric Dumazet RCU_INIT_POINTER(dev->l2opt, NULL); 1312f7917c00SJeff Kirsher out_free: 1313f7917c00SJeff Kirsher kfree(t); 1314f7917c00SJeff Kirsher return err; 1315f7917c00SJeff Kirsher } 1316f7917c00SJeff Kirsher 131788c5100cSDavid S. Miller static void clean_l2_data(struct rcu_head *head) 131888c5100cSDavid S. Miller { 131988c5100cSDavid S. Miller struct l2t_data *d = container_of(head, struct l2t_data, rcu_head); 132088c5100cSDavid S. Miller t3_free_l2t(d); 132188c5100cSDavid S. Miller } 132288c5100cSDavid S. Miller 132388c5100cSDavid S. Miller 1324f7917c00SJeff Kirsher void cxgb3_offload_deactivate(struct adapter *adapter) 1325f7917c00SJeff Kirsher { 1326f7917c00SJeff Kirsher struct t3cdev *tdev = &adapter->tdev; 1327f7917c00SJeff Kirsher struct t3c_data *t = T3C_DATA(tdev); 132888c5100cSDavid S. Miller struct l2t_data *d; 1329f7917c00SJeff Kirsher 1330f7917c00SJeff Kirsher remove_adapter(adapter); 1331f7917c00SJeff Kirsher if (list_empty(&adapter_list)) 1332f7917c00SJeff Kirsher unregister_netevent_notifier(&nb); 1333f7917c00SJeff Kirsher 1334f7917c00SJeff Kirsher free_tid_maps(&t->tid_maps); 1335f7917c00SJeff Kirsher T3C_DATA(tdev) = NULL; 133688c5100cSDavid S. Miller rcu_read_lock(); 133788c5100cSDavid S. Miller d = L2DATA(tdev); 133888c5100cSDavid S. Miller rcu_read_unlock(); 13392cfa5a04SEric Dumazet RCU_INIT_POINTER(tdev->l2opt, NULL); 134088c5100cSDavid S. Miller call_rcu(&d->rcu_head, clean_l2_data); 1341f7917c00SJeff Kirsher if (t->nofail_skb) 1342f7917c00SJeff Kirsher kfree_skb(t->nofail_skb); 1343f7917c00SJeff Kirsher kfree(t); 1344f7917c00SJeff Kirsher } 1345f7917c00SJeff Kirsher 1346f7917c00SJeff Kirsher static inline void register_tdev(struct t3cdev *tdev) 1347f7917c00SJeff Kirsher { 1348f7917c00SJeff Kirsher static int unit; 1349f7917c00SJeff Kirsher 1350f7917c00SJeff Kirsher mutex_lock(&cxgb3_db_lock); 1351f7917c00SJeff Kirsher snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++); 1352f7917c00SJeff Kirsher list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list); 1353f7917c00SJeff Kirsher mutex_unlock(&cxgb3_db_lock); 1354f7917c00SJeff Kirsher } 1355f7917c00SJeff Kirsher 1356f7917c00SJeff Kirsher static inline void unregister_tdev(struct t3cdev *tdev) 1357f7917c00SJeff Kirsher { 1358f7917c00SJeff Kirsher mutex_lock(&cxgb3_db_lock); 1359f7917c00SJeff Kirsher list_del(&tdev->ofld_dev_list); 1360f7917c00SJeff Kirsher mutex_unlock(&cxgb3_db_lock); 1361f7917c00SJeff Kirsher } 1362f7917c00SJeff Kirsher 1363f7917c00SJeff Kirsher static inline int adap2type(struct adapter *adapter) 1364f7917c00SJeff Kirsher { 1365f7917c00SJeff Kirsher int type = 0; 1366f7917c00SJeff Kirsher 1367f7917c00SJeff Kirsher switch (adapter->params.rev) { 1368f7917c00SJeff Kirsher case T3_REV_A: 1369f7917c00SJeff Kirsher type = T3A; 1370f7917c00SJeff Kirsher break; 1371f7917c00SJeff Kirsher case T3_REV_B: 1372f7917c00SJeff Kirsher case T3_REV_B2: 1373f7917c00SJeff Kirsher type = T3B; 1374f7917c00SJeff Kirsher break; 1375f7917c00SJeff Kirsher case T3_REV_C: 1376f7917c00SJeff Kirsher type = T3C; 1377f7917c00SJeff Kirsher break; 1378f7917c00SJeff Kirsher } 1379f7917c00SJeff Kirsher return type; 1380f7917c00SJeff Kirsher } 1381f7917c00SJeff Kirsher 1382f7917c00SJeff Kirsher void __devinit cxgb3_adapter_ofld(struct adapter *adapter) 1383f7917c00SJeff Kirsher { 1384f7917c00SJeff Kirsher struct t3cdev *tdev = &adapter->tdev; 1385f7917c00SJeff Kirsher 1386f7917c00SJeff Kirsher INIT_LIST_HEAD(&tdev->ofld_dev_list); 1387f7917c00SJeff Kirsher 1388f7917c00SJeff Kirsher cxgb3_set_dummy_ops(tdev); 1389f7917c00SJeff Kirsher tdev->send = t3_offload_tx; 1390f7917c00SJeff Kirsher tdev->ctl = cxgb_offload_ctl; 1391f7917c00SJeff Kirsher tdev->type = adap2type(adapter); 1392f7917c00SJeff Kirsher 1393f7917c00SJeff Kirsher register_tdev(tdev); 1394f7917c00SJeff Kirsher } 1395f7917c00SJeff Kirsher 1396f7917c00SJeff Kirsher void __devexit cxgb3_adapter_unofld(struct adapter *adapter) 1397f7917c00SJeff Kirsher { 1398f7917c00SJeff Kirsher struct t3cdev *tdev = &adapter->tdev; 1399f7917c00SJeff Kirsher 1400f7917c00SJeff Kirsher tdev->recv = NULL; 1401f7917c00SJeff Kirsher tdev->neigh_update = NULL; 1402f7917c00SJeff Kirsher 1403f7917c00SJeff Kirsher unregister_tdev(tdev); 1404f7917c00SJeff Kirsher } 1405f7917c00SJeff Kirsher 1406f7917c00SJeff Kirsher void __init cxgb3_offload_init(void) 1407f7917c00SJeff Kirsher { 1408f7917c00SJeff Kirsher int i; 1409f7917c00SJeff Kirsher 1410f7917c00SJeff Kirsher for (i = 0; i < NUM_CPL_CMDS; ++i) 1411f7917c00SJeff Kirsher cpl_handlers[i] = do_bad_cpl; 1412f7917c00SJeff Kirsher 1413f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl); 1414f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl); 1415f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl); 1416f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl); 1417f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl); 1418f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr); 1419f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl); 1420f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl); 1421f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl); 1422f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl); 1423f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl); 1424f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl); 1425f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl); 1426f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl); 1427f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl); 1428f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl); 1429f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss); 1430f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish); 1431f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl); 1432f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl); 1433f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term); 1434f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl); 1435f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_TRACE_PKT, do_trace); 1436f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl); 1437f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl); 1438f7917c00SJeff Kirsher t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl); 1439f7917c00SJeff Kirsher } 1440