1 /* 2 * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/list.h> 34 #include <linux/slab.h> 35 #include <net/neighbour.h> 36 #include <linux/notifier.h> 37 #include <linux/atomic.h> 38 #include <linux/proc_fs.h> 39 #include <linux/if_vlan.h> 40 #include <net/netevent.h> 41 #include <linux/highmem.h> 42 #include <linux/vmalloc.h> 43 #include <linux/export.h> 44 45 #include "common.h" 46 #include "regs.h" 47 #include "cxgb3_ioctl.h" 48 #include "cxgb3_ctl_defs.h" 49 #include "cxgb3_defs.h" 50 #include "l2t.h" 51 #include "firmware_exports.h" 52 #include "cxgb3_offload.h" 53 54 static LIST_HEAD(client_list); 55 static LIST_HEAD(ofld_dev_list); 56 static DEFINE_MUTEX(cxgb3_db_lock); 57 58 static DEFINE_RWLOCK(adapter_list_lock); 59 static LIST_HEAD(adapter_list); 60 61 static const unsigned int MAX_ATIDS = 64 * 1024; 62 static const unsigned int ATID_BASE = 0x10000; 63 64 static void cxgb_neigh_update(struct neighbour *neigh); 65 static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh, 66 struct dst_entry *new, struct neighbour *new_neigh); 67 68 static inline int offload_activated(struct t3cdev *tdev) 69 { 70 const struct adapter *adapter = tdev2adap(tdev); 71 72 return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); 73 } 74 75 /** 76 * cxgb3_register_client - register an offload client 77 * @client: the client 78 * 79 * Add the client to the client list, 80 * and call backs the client for each activated offload device 81 */ 82 void cxgb3_register_client(struct cxgb3_client *client) 83 { 84 struct t3cdev *tdev; 85 86 mutex_lock(&cxgb3_db_lock); 87 list_add_tail(&client->client_list, &client_list); 88 89 if (client->add) { 90 list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) { 91 if (offload_activated(tdev)) 92 client->add(tdev); 93 } 94 } 95 mutex_unlock(&cxgb3_db_lock); 96 } 97 98 EXPORT_SYMBOL(cxgb3_register_client); 99 100 /** 101 * cxgb3_unregister_client - unregister an offload client 102 * @client: the client 103 * 104 * Remove the client to the client list, 105 * and call backs the client for each activated offload device. 106 */ 107 void cxgb3_unregister_client(struct cxgb3_client *client) 108 { 109 struct t3cdev *tdev; 110 111 mutex_lock(&cxgb3_db_lock); 112 list_del(&client->client_list); 113 114 if (client->remove) { 115 list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) { 116 if (offload_activated(tdev)) 117 client->remove(tdev); 118 } 119 } 120 mutex_unlock(&cxgb3_db_lock); 121 } 122 123 EXPORT_SYMBOL(cxgb3_unregister_client); 124 125 /** 126 * cxgb3_add_clients - activate registered clients for an offload device 127 * @tdev: the offload device 128 * 129 * Call backs all registered clients once a offload device is activated 130 */ 131 void cxgb3_add_clients(struct t3cdev *tdev) 132 { 133 struct cxgb3_client *client; 134 135 mutex_lock(&cxgb3_db_lock); 136 list_for_each_entry(client, &client_list, client_list) { 137 if (client->add) 138 client->add(tdev); 139 } 140 mutex_unlock(&cxgb3_db_lock); 141 } 142 143 /** 144 * cxgb3_remove_clients - deactivates registered clients 145 * for an offload device 146 * @tdev: the offload device 147 * 148 * Call backs all registered clients once a offload device is deactivated 149 */ 150 void cxgb3_remove_clients(struct t3cdev *tdev) 151 { 152 struct cxgb3_client *client; 153 154 mutex_lock(&cxgb3_db_lock); 155 list_for_each_entry(client, &client_list, client_list) { 156 if (client->remove) 157 client->remove(tdev); 158 } 159 mutex_unlock(&cxgb3_db_lock); 160 } 161 162 void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port) 163 { 164 struct cxgb3_client *client; 165 166 mutex_lock(&cxgb3_db_lock); 167 list_for_each_entry(client, &client_list, client_list) { 168 if (client->event_handler) 169 client->event_handler(tdev, event, port); 170 } 171 mutex_unlock(&cxgb3_db_lock); 172 } 173 174 static struct net_device *get_iff_from_mac(struct adapter *adapter, 175 const unsigned char *mac, 176 unsigned int vlan) 177 { 178 int i; 179 180 for_each_port(adapter, i) { 181 struct net_device *dev = adapter->port[i]; 182 183 if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) { 184 if (vlan && vlan != VLAN_VID_MASK) { 185 rcu_read_lock(); 186 dev = __vlan_find_dev_deep(dev, vlan); 187 rcu_read_unlock(); 188 } else if (netif_is_bond_slave(dev)) { 189 while (dev->master) 190 dev = dev->master; 191 } 192 return dev; 193 } 194 } 195 return NULL; 196 } 197 198 static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req, 199 void *data) 200 { 201 int i; 202 int ret = 0; 203 unsigned int val = 0; 204 struct ulp_iscsi_info *uiip = data; 205 206 switch (req) { 207 case ULP_ISCSI_GET_PARAMS: 208 uiip->pdev = adapter->pdev; 209 uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT); 210 uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT); 211 uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK); 212 213 val = t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ); 214 for (i = 0; i < 4; i++, val >>= 8) 215 uiip->pgsz_factor[i] = val & 0xFF; 216 217 val = t3_read_reg(adapter, A_TP_PARA_REG7); 218 uiip->max_txsz = 219 uiip->max_rxsz = min((val >> S_PMMAXXFERLEN0)&M_PMMAXXFERLEN0, 220 (val >> S_PMMAXXFERLEN1)&M_PMMAXXFERLEN1); 221 /* 222 * On tx, the iscsi pdu has to be <= tx page size and has to 223 * fit into the Tx PM FIFO. 224 */ 225 val = min(adapter->params.tp.tx_pg_size, 226 t3_read_reg(adapter, A_PM1_TX_CFG) >> 17); 227 uiip->max_txsz = min(val, uiip->max_txsz); 228 229 /* set MaxRxData to 16224 */ 230 val = t3_read_reg(adapter, A_TP_PARA_REG2); 231 if ((val >> S_MAXRXDATA) != 0x3f60) { 232 val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE); 233 val |= V_MAXRXDATA(0x3f60); 234 printk(KERN_INFO 235 "%s, iscsi set MaxRxData to 16224 (0x%x).\n", 236 adapter->name, val); 237 t3_write_reg(adapter, A_TP_PARA_REG2, val); 238 } 239 240 /* 241 * on rx, the iscsi pdu has to be < rx page size and the 242 * the max rx data length programmed in TP 243 */ 244 val = min(adapter->params.tp.rx_pg_size, 245 ((t3_read_reg(adapter, A_TP_PARA_REG2)) >> 246 S_MAXRXDATA) & M_MAXRXDATA); 247 uiip->max_rxsz = min(val, uiip->max_rxsz); 248 break; 249 case ULP_ISCSI_SET_PARAMS: 250 t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask); 251 /* program the ddp page sizes */ 252 for (i = 0; i < 4; i++) 253 val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i); 254 if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) { 255 printk(KERN_INFO 256 "%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u.\n", 257 adapter->name, val, uiip->pgsz_factor[0], 258 uiip->pgsz_factor[1], uiip->pgsz_factor[2], 259 uiip->pgsz_factor[3]); 260 t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val); 261 } 262 break; 263 default: 264 ret = -EOPNOTSUPP; 265 } 266 return ret; 267 } 268 269 /* Response queue used for RDMA events. */ 270 #define ASYNC_NOTIF_RSPQ 0 271 272 static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data) 273 { 274 int ret = 0; 275 276 switch (req) { 277 case RDMA_GET_PARAMS: { 278 struct rdma_info *rdma = data; 279 struct pci_dev *pdev = adapter->pdev; 280 281 rdma->udbell_physbase = pci_resource_start(pdev, 2); 282 rdma->udbell_len = pci_resource_len(pdev, 2); 283 rdma->tpt_base = 284 t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT); 285 rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT); 286 rdma->pbl_base = 287 t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT); 288 rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT); 289 rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT); 290 rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT); 291 rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL; 292 rdma->pdev = pdev; 293 break; 294 } 295 case RDMA_CQ_OP:{ 296 unsigned long flags; 297 struct rdma_cq_op *rdma = data; 298 299 /* may be called in any context */ 300 spin_lock_irqsave(&adapter->sge.reg_lock, flags); 301 ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op, 302 rdma->credits); 303 spin_unlock_irqrestore(&adapter->sge.reg_lock, flags); 304 break; 305 } 306 case RDMA_GET_MEM:{ 307 struct ch_mem_range *t = data; 308 struct mc7 *mem; 309 310 if ((t->addr & 7) || (t->len & 7)) 311 return -EINVAL; 312 if (t->mem_id == MEM_CM) 313 mem = &adapter->cm; 314 else if (t->mem_id == MEM_PMRX) 315 mem = &adapter->pmrx; 316 else if (t->mem_id == MEM_PMTX) 317 mem = &adapter->pmtx; 318 else 319 return -EINVAL; 320 321 ret = 322 t3_mc7_bd_read(mem, t->addr / 8, t->len / 8, 323 (u64 *) t->buf); 324 if (ret) 325 return ret; 326 break; 327 } 328 case RDMA_CQ_SETUP:{ 329 struct rdma_cq_setup *rdma = data; 330 331 spin_lock_irq(&adapter->sge.reg_lock); 332 ret = 333 t3_sge_init_cqcntxt(adapter, rdma->id, 334 rdma->base_addr, rdma->size, 335 ASYNC_NOTIF_RSPQ, 336 rdma->ovfl_mode, rdma->credits, 337 rdma->credit_thres); 338 spin_unlock_irq(&adapter->sge.reg_lock); 339 break; 340 } 341 case RDMA_CQ_DISABLE: 342 spin_lock_irq(&adapter->sge.reg_lock); 343 ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data); 344 spin_unlock_irq(&adapter->sge.reg_lock); 345 break; 346 case RDMA_CTRL_QP_SETUP:{ 347 struct rdma_ctrlqp_setup *rdma = data; 348 349 spin_lock_irq(&adapter->sge.reg_lock); 350 ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0, 351 SGE_CNTXT_RDMA, 352 ASYNC_NOTIF_RSPQ, 353 rdma->base_addr, rdma->size, 354 FW_RI_TID_START, 1, 0); 355 spin_unlock_irq(&adapter->sge.reg_lock); 356 break; 357 } 358 case RDMA_GET_MIB: { 359 spin_lock(&adapter->stats_lock); 360 t3_tp_get_mib_stats(adapter, (struct tp_mib_stats *)data); 361 spin_unlock(&adapter->stats_lock); 362 break; 363 } 364 default: 365 ret = -EOPNOTSUPP; 366 } 367 return ret; 368 } 369 370 static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data) 371 { 372 struct adapter *adapter = tdev2adap(tdev); 373 struct tid_range *tid; 374 struct mtutab *mtup; 375 struct iff_mac *iffmacp; 376 struct ddp_params *ddpp; 377 struct adap_ports *ports; 378 struct ofld_page_info *rx_page_info; 379 struct tp_params *tp = &adapter->params.tp; 380 int i; 381 382 switch (req) { 383 case GET_MAX_OUTSTANDING_WR: 384 *(unsigned int *)data = FW_WR_NUM; 385 break; 386 case GET_WR_LEN: 387 *(unsigned int *)data = WR_FLITS; 388 break; 389 case GET_TX_MAX_CHUNK: 390 *(unsigned int *)data = 1 << 20; /* 1MB */ 391 break; 392 case GET_TID_RANGE: 393 tid = data; 394 tid->num = t3_mc5_size(&adapter->mc5) - 395 adapter->params.mc5.nroutes - 396 adapter->params.mc5.nfilters - adapter->params.mc5.nservers; 397 tid->base = 0; 398 break; 399 case GET_STID_RANGE: 400 tid = data; 401 tid->num = adapter->params.mc5.nservers; 402 tid->base = t3_mc5_size(&adapter->mc5) - tid->num - 403 adapter->params.mc5.nfilters - adapter->params.mc5.nroutes; 404 break; 405 case GET_L2T_CAPACITY: 406 *(unsigned int *)data = 2048; 407 break; 408 case GET_MTUS: 409 mtup = data; 410 mtup->size = NMTUS; 411 mtup->mtus = adapter->params.mtus; 412 break; 413 case GET_IFF_FROM_MAC: 414 iffmacp = data; 415 iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr, 416 iffmacp->vlan_tag & 417 VLAN_VID_MASK); 418 break; 419 case GET_DDP_PARAMS: 420 ddpp = data; 421 ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT); 422 ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT); 423 ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK); 424 break; 425 case GET_PORTS: 426 ports = data; 427 ports->nports = adapter->params.nports; 428 for_each_port(adapter, i) 429 ports->lldevs[i] = adapter->port[i]; 430 break; 431 case ULP_ISCSI_GET_PARAMS: 432 case ULP_ISCSI_SET_PARAMS: 433 if (!offload_running(adapter)) 434 return -EAGAIN; 435 return cxgb_ulp_iscsi_ctl(adapter, req, data); 436 case RDMA_GET_PARAMS: 437 case RDMA_CQ_OP: 438 case RDMA_CQ_SETUP: 439 case RDMA_CQ_DISABLE: 440 case RDMA_CTRL_QP_SETUP: 441 case RDMA_GET_MEM: 442 case RDMA_GET_MIB: 443 if (!offload_running(adapter)) 444 return -EAGAIN; 445 return cxgb_rdma_ctl(adapter, req, data); 446 case GET_RX_PAGE_INFO: 447 rx_page_info = data; 448 rx_page_info->page_size = tp->rx_pg_size; 449 rx_page_info->num = tp->rx_num_pgs; 450 break; 451 case GET_ISCSI_IPV4ADDR: { 452 struct iscsi_ipv4addr *p = data; 453 struct port_info *pi = netdev_priv(p->dev); 454 p->ipv4addr = pi->iscsi_ipv4addr; 455 break; 456 } 457 case GET_EMBEDDED_INFO: { 458 struct ch_embedded_info *e = data; 459 460 spin_lock(&adapter->stats_lock); 461 t3_get_fw_version(adapter, &e->fw_vers); 462 t3_get_tp_version(adapter, &e->tp_vers); 463 spin_unlock(&adapter->stats_lock); 464 break; 465 } 466 default: 467 return -EOPNOTSUPP; 468 } 469 return 0; 470 } 471 472 /* 473 * Dummy handler for Rx offload packets in case we get an offload packet before 474 * proper processing is setup. This complains and drops the packet as it isn't 475 * normal to get offload packets at this stage. 476 */ 477 static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs, 478 int n) 479 { 480 while (n--) 481 dev_kfree_skb_any(skbs[n]); 482 return 0; 483 } 484 485 static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh) 486 { 487 } 488 489 void cxgb3_set_dummy_ops(struct t3cdev *dev) 490 { 491 dev->recv = rx_offload_blackhole; 492 dev->neigh_update = dummy_neigh_update; 493 } 494 495 /* 496 * Free an active-open TID. 497 */ 498 void *cxgb3_free_atid(struct t3cdev *tdev, int atid) 499 { 500 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 501 union active_open_entry *p = atid2entry(t, atid); 502 void *ctx = p->t3c_tid.ctx; 503 504 spin_lock_bh(&t->atid_lock); 505 p->next = t->afree; 506 t->afree = p; 507 t->atids_in_use--; 508 spin_unlock_bh(&t->atid_lock); 509 510 return ctx; 511 } 512 513 EXPORT_SYMBOL(cxgb3_free_atid); 514 515 /* 516 * Free a server TID and return it to the free pool. 517 */ 518 void cxgb3_free_stid(struct t3cdev *tdev, int stid) 519 { 520 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 521 union listen_entry *p = stid2entry(t, stid); 522 523 spin_lock_bh(&t->stid_lock); 524 p->next = t->sfree; 525 t->sfree = p; 526 t->stids_in_use--; 527 spin_unlock_bh(&t->stid_lock); 528 } 529 530 EXPORT_SYMBOL(cxgb3_free_stid); 531 532 void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client, 533 void *ctx, unsigned int tid) 534 { 535 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 536 537 t->tid_tab[tid].client = client; 538 t->tid_tab[tid].ctx = ctx; 539 atomic_inc(&t->tids_in_use); 540 } 541 542 EXPORT_SYMBOL(cxgb3_insert_tid); 543 544 /* 545 * Populate a TID_RELEASE WR. The skb must be already propely sized. 546 */ 547 static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid) 548 { 549 struct cpl_tid_release *req; 550 551 skb->priority = CPL_PRIORITY_SETUP; 552 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); 553 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 554 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); 555 } 556 557 static void t3_process_tid_release_list(struct work_struct *work) 558 { 559 struct t3c_data *td = container_of(work, struct t3c_data, 560 tid_release_task); 561 struct sk_buff *skb; 562 struct t3cdev *tdev = td->dev; 563 564 565 spin_lock_bh(&td->tid_release_lock); 566 while (td->tid_release_list) { 567 struct t3c_tid_entry *p = td->tid_release_list; 568 569 td->tid_release_list = p->ctx; 570 spin_unlock_bh(&td->tid_release_lock); 571 572 skb = alloc_skb(sizeof(struct cpl_tid_release), 573 GFP_KERNEL); 574 if (!skb) 575 skb = td->nofail_skb; 576 if (!skb) { 577 spin_lock_bh(&td->tid_release_lock); 578 p->ctx = (void *)td->tid_release_list; 579 td->tid_release_list = p; 580 break; 581 } 582 mk_tid_release(skb, p - td->tid_maps.tid_tab); 583 cxgb3_ofld_send(tdev, skb); 584 p->ctx = NULL; 585 if (skb == td->nofail_skb) 586 td->nofail_skb = 587 alloc_skb(sizeof(struct cpl_tid_release), 588 GFP_KERNEL); 589 spin_lock_bh(&td->tid_release_lock); 590 } 591 td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1; 592 spin_unlock_bh(&td->tid_release_lock); 593 594 if (!td->nofail_skb) 595 td->nofail_skb = 596 alloc_skb(sizeof(struct cpl_tid_release), 597 GFP_KERNEL); 598 } 599 600 /* use ctx as a next pointer in the tid release list */ 601 void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid) 602 { 603 struct t3c_data *td = T3C_DATA(tdev); 604 struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid]; 605 606 spin_lock_bh(&td->tid_release_lock); 607 p->ctx = (void *)td->tid_release_list; 608 p->client = NULL; 609 td->tid_release_list = p; 610 if (!p->ctx || td->release_list_incomplete) 611 schedule_work(&td->tid_release_task); 612 spin_unlock_bh(&td->tid_release_lock); 613 } 614 615 EXPORT_SYMBOL(cxgb3_queue_tid_release); 616 617 /* 618 * Remove a tid from the TID table. A client may defer processing its last 619 * CPL message if it is locked at the time it arrives, and while the message 620 * sits in the client's backlog the TID may be reused for another connection. 621 * To handle this we atomically switch the TID association if it still points 622 * to the original client context. 623 */ 624 void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid) 625 { 626 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 627 628 BUG_ON(tid >= t->ntids); 629 if (tdev->type == T3A) 630 (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL); 631 else { 632 struct sk_buff *skb; 633 634 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); 635 if (likely(skb)) { 636 mk_tid_release(skb, tid); 637 cxgb3_ofld_send(tdev, skb); 638 t->tid_tab[tid].ctx = NULL; 639 } else 640 cxgb3_queue_tid_release(tdev, tid); 641 } 642 atomic_dec(&t->tids_in_use); 643 } 644 645 EXPORT_SYMBOL(cxgb3_remove_tid); 646 647 int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client, 648 void *ctx) 649 { 650 int atid = -1; 651 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 652 653 spin_lock_bh(&t->atid_lock); 654 if (t->afree && 655 t->atids_in_use + atomic_read(&t->tids_in_use) + MC5_MIN_TIDS <= 656 t->ntids) { 657 union active_open_entry *p = t->afree; 658 659 atid = (p - t->atid_tab) + t->atid_base; 660 t->afree = p->next; 661 p->t3c_tid.ctx = ctx; 662 p->t3c_tid.client = client; 663 t->atids_in_use++; 664 } 665 spin_unlock_bh(&t->atid_lock); 666 return atid; 667 } 668 669 EXPORT_SYMBOL(cxgb3_alloc_atid); 670 671 int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client, 672 void *ctx) 673 { 674 int stid = -1; 675 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 676 677 spin_lock_bh(&t->stid_lock); 678 if (t->sfree) { 679 union listen_entry *p = t->sfree; 680 681 stid = (p - t->stid_tab) + t->stid_base; 682 t->sfree = p->next; 683 p->t3c_tid.ctx = ctx; 684 p->t3c_tid.client = client; 685 t->stids_in_use++; 686 } 687 spin_unlock_bh(&t->stid_lock); 688 return stid; 689 } 690 691 EXPORT_SYMBOL(cxgb3_alloc_stid); 692 693 /* Get the t3cdev associated with a net_device */ 694 struct t3cdev *dev2t3cdev(struct net_device *dev) 695 { 696 const struct port_info *pi = netdev_priv(dev); 697 698 return (struct t3cdev *)pi->adapter; 699 } 700 701 EXPORT_SYMBOL(dev2t3cdev); 702 703 static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb) 704 { 705 struct cpl_smt_write_rpl *rpl = cplhdr(skb); 706 707 if (rpl->status != CPL_ERR_NONE) 708 printk(KERN_ERR 709 "Unexpected SMT_WRITE_RPL status %u for entry %u\n", 710 rpl->status, GET_TID(rpl)); 711 712 return CPL_RET_BUF_DONE; 713 } 714 715 static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb) 716 { 717 struct cpl_l2t_write_rpl *rpl = cplhdr(skb); 718 719 if (rpl->status != CPL_ERR_NONE) 720 printk(KERN_ERR 721 "Unexpected L2T_WRITE_RPL status %u for entry %u\n", 722 rpl->status, GET_TID(rpl)); 723 724 return CPL_RET_BUF_DONE; 725 } 726 727 static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb) 728 { 729 struct cpl_rte_write_rpl *rpl = cplhdr(skb); 730 731 if (rpl->status != CPL_ERR_NONE) 732 printk(KERN_ERR 733 "Unexpected RTE_WRITE_RPL status %u for entry %u\n", 734 rpl->status, GET_TID(rpl)); 735 736 return CPL_RET_BUF_DONE; 737 } 738 739 static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb) 740 { 741 struct cpl_act_open_rpl *rpl = cplhdr(skb); 742 unsigned int atid = G_TID(ntohl(rpl->atid)); 743 struct t3c_tid_entry *t3c_tid; 744 745 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); 746 if (t3c_tid && t3c_tid->ctx && t3c_tid->client && 747 t3c_tid->client->handlers && 748 t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) { 749 return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb, 750 t3c_tid-> 751 ctx); 752 } else { 753 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 754 dev->name, CPL_ACT_OPEN_RPL); 755 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 756 } 757 } 758 759 static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb) 760 { 761 union opcode_tid *p = cplhdr(skb); 762 unsigned int stid = G_TID(ntohl(p->opcode_tid)); 763 struct t3c_tid_entry *t3c_tid; 764 765 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid); 766 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 767 t3c_tid->client->handlers[p->opcode]) { 768 return t3c_tid->client->handlers[p->opcode] (dev, skb, 769 t3c_tid->ctx); 770 } else { 771 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 772 dev->name, p->opcode); 773 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 774 } 775 } 776 777 static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb) 778 { 779 union opcode_tid *p = cplhdr(skb); 780 unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); 781 struct t3c_tid_entry *t3c_tid; 782 783 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 784 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 785 t3c_tid->client->handlers[p->opcode]) { 786 return t3c_tid->client->handlers[p->opcode] 787 (dev, skb, t3c_tid->ctx); 788 } else { 789 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 790 dev->name, p->opcode); 791 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 792 } 793 } 794 795 static int do_cr(struct t3cdev *dev, struct sk_buff *skb) 796 { 797 struct cpl_pass_accept_req *req = cplhdr(skb); 798 unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); 799 struct tid_info *t = &(T3C_DATA(dev))->tid_maps; 800 struct t3c_tid_entry *t3c_tid; 801 unsigned int tid = GET_TID(req); 802 803 if (unlikely(tid >= t->ntids)) { 804 printk("%s: passive open TID %u too large\n", 805 dev->name, tid); 806 t3_fatal_err(tdev2adap(dev)); 807 return CPL_RET_BUF_DONE; 808 } 809 810 t3c_tid = lookup_stid(t, stid); 811 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 812 t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) { 813 return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ] 814 (dev, skb, t3c_tid->ctx); 815 } else { 816 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 817 dev->name, CPL_PASS_ACCEPT_REQ); 818 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 819 } 820 } 821 822 /* 823 * Returns an sk_buff for a reply CPL message of size len. If the input 824 * sk_buff has no other users it is trimmed and reused, otherwise a new buffer 825 * is allocated. The input skb must be of size at least len. Note that this 826 * operation does not destroy the original skb data even if it decides to reuse 827 * the buffer. 828 */ 829 static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len, 830 gfp_t gfp) 831 { 832 if (likely(!skb_cloned(skb))) { 833 BUG_ON(skb->len < len); 834 __skb_trim(skb, len); 835 skb_get(skb); 836 } else { 837 skb = alloc_skb(len, gfp); 838 if (skb) 839 __skb_put(skb, len); 840 } 841 return skb; 842 } 843 844 static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb) 845 { 846 union opcode_tid *p = cplhdr(skb); 847 unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); 848 struct t3c_tid_entry *t3c_tid; 849 850 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 851 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 852 t3c_tid->client->handlers[p->opcode]) { 853 return t3c_tid->client->handlers[p->opcode] 854 (dev, skb, t3c_tid->ctx); 855 } else { 856 struct cpl_abort_req_rss *req = cplhdr(skb); 857 struct cpl_abort_rpl *rpl; 858 struct sk_buff *reply_skb; 859 unsigned int tid = GET_TID(req); 860 u8 cmd = req->status; 861 862 if (req->status == CPL_ERR_RTX_NEG_ADVICE || 863 req->status == CPL_ERR_PERSIST_NEG_ADVICE) 864 goto out; 865 866 reply_skb = cxgb3_get_cpl_reply_skb(skb, 867 sizeof(struct 868 cpl_abort_rpl), 869 GFP_ATOMIC); 870 871 if (!reply_skb) { 872 printk("do_abort_req_rss: couldn't get skb!\n"); 873 goto out; 874 } 875 reply_skb->priority = CPL_PRIORITY_DATA; 876 __skb_put(reply_skb, sizeof(struct cpl_abort_rpl)); 877 rpl = cplhdr(reply_skb); 878 rpl->wr.wr_hi = 879 htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); 880 rpl->wr.wr_lo = htonl(V_WR_TID(tid)); 881 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid)); 882 rpl->cmd = cmd; 883 cxgb3_ofld_send(dev, reply_skb); 884 out: 885 return CPL_RET_BUF_DONE; 886 } 887 } 888 889 static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb) 890 { 891 struct cpl_act_establish *req = cplhdr(skb); 892 unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); 893 struct tid_info *t = &(T3C_DATA(dev))->tid_maps; 894 struct t3c_tid_entry *t3c_tid; 895 unsigned int tid = GET_TID(req); 896 897 if (unlikely(tid >= t->ntids)) { 898 printk("%s: active establish TID %u too large\n", 899 dev->name, tid); 900 t3_fatal_err(tdev2adap(dev)); 901 return CPL_RET_BUF_DONE; 902 } 903 904 t3c_tid = lookup_atid(t, atid); 905 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 906 t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) { 907 return t3c_tid->client->handlers[CPL_ACT_ESTABLISH] 908 (dev, skb, t3c_tid->ctx); 909 } else { 910 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 911 dev->name, CPL_ACT_ESTABLISH); 912 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 913 } 914 } 915 916 static int do_trace(struct t3cdev *dev, struct sk_buff *skb) 917 { 918 struct cpl_trace_pkt *p = cplhdr(skb); 919 920 skb->protocol = htons(0xffff); 921 skb->dev = dev->lldev; 922 skb_pull(skb, sizeof(*p)); 923 skb_reset_mac_header(skb); 924 netif_receive_skb(skb); 925 return 0; 926 } 927 928 /* 929 * That skb would better have come from process_responses() where we abuse 930 * ->priority and ->csum to carry our data. NB: if we get to per-arch 931 * ->csum, the things might get really interesting here. 932 */ 933 934 static inline u32 get_hwtid(struct sk_buff *skb) 935 { 936 return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff; 937 } 938 939 static inline u32 get_opcode(struct sk_buff *skb) 940 { 941 return G_OPCODE(ntohl((__force __be32)skb->csum)); 942 } 943 944 static int do_term(struct t3cdev *dev, struct sk_buff *skb) 945 { 946 unsigned int hwtid = get_hwtid(skb); 947 unsigned int opcode = get_opcode(skb); 948 struct t3c_tid_entry *t3c_tid; 949 950 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 951 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 952 t3c_tid->client->handlers[opcode]) { 953 return t3c_tid->client->handlers[opcode] (dev, skb, 954 t3c_tid->ctx); 955 } else { 956 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 957 dev->name, opcode); 958 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 959 } 960 } 961 962 static int nb_callback(struct notifier_block *self, unsigned long event, 963 void *ctx) 964 { 965 switch (event) { 966 case (NETEVENT_NEIGH_UPDATE):{ 967 cxgb_neigh_update((struct neighbour *)ctx); 968 break; 969 } 970 case (NETEVENT_REDIRECT):{ 971 struct netevent_redirect *nr = ctx; 972 cxgb_redirect(nr->old, nr->old_neigh, 973 nr->new, nr->new_neigh); 974 cxgb_neigh_update(nr->new_neigh); 975 break; 976 } 977 default: 978 break; 979 } 980 return 0; 981 } 982 983 static struct notifier_block nb = { 984 .notifier_call = nb_callback 985 }; 986 987 /* 988 * Process a received packet with an unknown/unexpected CPL opcode. 989 */ 990 static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb) 991 { 992 printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name, 993 *skb->data); 994 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 995 } 996 997 /* 998 * Handlers for each CPL opcode 999 */ 1000 static cpl_handler_func cpl_handlers[NUM_CPL_CMDS]; 1001 1002 /* 1003 * Add a new handler to the CPL dispatch table. A NULL handler may be supplied 1004 * to unregister an existing handler. 1005 */ 1006 void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h) 1007 { 1008 if (opcode < NUM_CPL_CMDS) 1009 cpl_handlers[opcode] = h ? h : do_bad_cpl; 1010 else 1011 printk(KERN_ERR "T3C: handler registration for " 1012 "opcode %x failed\n", opcode); 1013 } 1014 1015 EXPORT_SYMBOL(t3_register_cpl_handler); 1016 1017 /* 1018 * T3CDEV's receive method. 1019 */ 1020 static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n) 1021 { 1022 while (n--) { 1023 struct sk_buff *skb = *skbs++; 1024 unsigned int opcode = get_opcode(skb); 1025 int ret = cpl_handlers[opcode] (dev, skb); 1026 1027 #if VALIDATE_TID 1028 if (ret & CPL_RET_UNKNOWN_TID) { 1029 union opcode_tid *p = cplhdr(skb); 1030 1031 printk(KERN_ERR "%s: CPL message (opcode %u) had " 1032 "unknown TID %u\n", dev->name, opcode, 1033 G_TID(ntohl(p->opcode_tid))); 1034 } 1035 #endif 1036 if (ret & CPL_RET_BUF_DONE) 1037 kfree_skb(skb); 1038 } 1039 return 0; 1040 } 1041 1042 /* 1043 * Sends an sk_buff to a T3C driver after dealing with any active network taps. 1044 */ 1045 int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb) 1046 { 1047 int r; 1048 1049 local_bh_disable(); 1050 r = dev->send(dev, skb); 1051 local_bh_enable(); 1052 return r; 1053 } 1054 1055 EXPORT_SYMBOL(cxgb3_ofld_send); 1056 1057 static int is_offloading(struct net_device *dev) 1058 { 1059 struct adapter *adapter; 1060 int i; 1061 1062 read_lock_bh(&adapter_list_lock); 1063 list_for_each_entry(adapter, &adapter_list, adapter_list) { 1064 for_each_port(adapter, i) { 1065 if (dev == adapter->port[i]) { 1066 read_unlock_bh(&adapter_list_lock); 1067 return 1; 1068 } 1069 } 1070 } 1071 read_unlock_bh(&adapter_list_lock); 1072 return 0; 1073 } 1074 1075 static void cxgb_neigh_update(struct neighbour *neigh) 1076 { 1077 struct net_device *dev; 1078 1079 if (!neigh) 1080 return; 1081 dev = neigh->dev; 1082 if (dev && (is_offloading(dev))) { 1083 struct t3cdev *tdev = dev2t3cdev(dev); 1084 1085 BUG_ON(!tdev); 1086 t3_l2t_update(tdev, neigh); 1087 } 1088 } 1089 1090 static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e) 1091 { 1092 struct sk_buff *skb; 1093 struct cpl_set_tcb_field *req; 1094 1095 skb = alloc_skb(sizeof(*req), GFP_ATOMIC); 1096 if (!skb) { 1097 printk(KERN_ERR "%s: cannot allocate skb!\n", __func__); 1098 return; 1099 } 1100 skb->priority = CPL_PRIORITY_CONTROL; 1101 req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req)); 1102 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1103 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 1104 req->reply = 0; 1105 req->cpu_idx = 0; 1106 req->word = htons(W_TCB_L2T_IX); 1107 req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX)); 1108 req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx)); 1109 tdev->send(tdev, skb); 1110 } 1111 1112 static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh, 1113 struct dst_entry *new, struct neighbour *new_neigh) 1114 { 1115 struct net_device *olddev, *newdev; 1116 struct tid_info *ti; 1117 struct t3cdev *tdev; 1118 u32 tid; 1119 int update_tcb; 1120 struct l2t_entry *e; 1121 struct t3c_tid_entry *te; 1122 1123 olddev = old_neigh->dev; 1124 newdev = new_neigh->dev; 1125 1126 if (!is_offloading(olddev)) 1127 return; 1128 if (!is_offloading(newdev)) { 1129 printk(KERN_WARNING "%s: Redirect to non-offload " 1130 "device ignored.\n", __func__); 1131 return; 1132 } 1133 tdev = dev2t3cdev(olddev); 1134 BUG_ON(!tdev); 1135 if (tdev != dev2t3cdev(newdev)) { 1136 printk(KERN_WARNING "%s: Redirect to different " 1137 "offload device ignored.\n", __func__); 1138 return; 1139 } 1140 1141 /* Add new L2T entry */ 1142 e = t3_l2t_get(tdev, new, newdev); 1143 if (!e) { 1144 printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n", 1145 __func__); 1146 return; 1147 } 1148 1149 /* Walk tid table and notify clients of dst change. */ 1150 ti = &(T3C_DATA(tdev))->tid_maps; 1151 for (tid = 0; tid < ti->ntids; tid++) { 1152 te = lookup_tid(ti, tid); 1153 BUG_ON(!te); 1154 if (te && te->ctx && te->client && te->client->redirect) { 1155 update_tcb = te->client->redirect(te->ctx, old, new, e); 1156 if (update_tcb) { 1157 rcu_read_lock(); 1158 l2t_hold(L2DATA(tdev), e); 1159 rcu_read_unlock(); 1160 set_l2t_ix(tdev, tid, e); 1161 } 1162 } 1163 } 1164 l2t_release(tdev, e); 1165 } 1166 1167 /* 1168 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. 1169 * The allocated memory is cleared. 1170 */ 1171 void *cxgb_alloc_mem(unsigned long size) 1172 { 1173 void *p = kzalloc(size, GFP_KERNEL); 1174 1175 if (!p) 1176 p = vzalloc(size); 1177 return p; 1178 } 1179 1180 /* 1181 * Free memory allocated through t3_alloc_mem(). 1182 */ 1183 void cxgb_free_mem(void *addr) 1184 { 1185 if (is_vmalloc_addr(addr)) 1186 vfree(addr); 1187 else 1188 kfree(addr); 1189 } 1190 1191 /* 1192 * Allocate and initialize the TID tables. Returns 0 on success. 1193 */ 1194 static int init_tid_tabs(struct tid_info *t, unsigned int ntids, 1195 unsigned int natids, unsigned int nstids, 1196 unsigned int atid_base, unsigned int stid_base) 1197 { 1198 unsigned long size = ntids * sizeof(*t->tid_tab) + 1199 natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab); 1200 1201 t->tid_tab = cxgb_alloc_mem(size); 1202 if (!t->tid_tab) 1203 return -ENOMEM; 1204 1205 t->stid_tab = (union listen_entry *)&t->tid_tab[ntids]; 1206 t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids]; 1207 t->ntids = ntids; 1208 t->nstids = nstids; 1209 t->stid_base = stid_base; 1210 t->sfree = NULL; 1211 t->natids = natids; 1212 t->atid_base = atid_base; 1213 t->afree = NULL; 1214 t->stids_in_use = t->atids_in_use = 0; 1215 atomic_set(&t->tids_in_use, 0); 1216 spin_lock_init(&t->stid_lock); 1217 spin_lock_init(&t->atid_lock); 1218 1219 /* 1220 * Setup the free lists for stid_tab and atid_tab. 1221 */ 1222 if (nstids) { 1223 while (--nstids) 1224 t->stid_tab[nstids - 1].next = &t->stid_tab[nstids]; 1225 t->sfree = t->stid_tab; 1226 } 1227 if (natids) { 1228 while (--natids) 1229 t->atid_tab[natids - 1].next = &t->atid_tab[natids]; 1230 t->afree = t->atid_tab; 1231 } 1232 return 0; 1233 } 1234 1235 static void free_tid_maps(struct tid_info *t) 1236 { 1237 cxgb_free_mem(t->tid_tab); 1238 } 1239 1240 static inline void add_adapter(struct adapter *adap) 1241 { 1242 write_lock_bh(&adapter_list_lock); 1243 list_add_tail(&adap->adapter_list, &adapter_list); 1244 write_unlock_bh(&adapter_list_lock); 1245 } 1246 1247 static inline void remove_adapter(struct adapter *adap) 1248 { 1249 write_lock_bh(&adapter_list_lock); 1250 list_del(&adap->adapter_list); 1251 write_unlock_bh(&adapter_list_lock); 1252 } 1253 1254 int cxgb3_offload_activate(struct adapter *adapter) 1255 { 1256 struct t3cdev *dev = &adapter->tdev; 1257 int natids, err; 1258 struct t3c_data *t; 1259 struct tid_range stid_range, tid_range; 1260 struct mtutab mtutab; 1261 unsigned int l2t_capacity; 1262 1263 t = kzalloc(sizeof(*t), GFP_KERNEL); 1264 if (!t) 1265 return -ENOMEM; 1266 1267 err = -EOPNOTSUPP; 1268 if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 || 1269 dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 || 1270 dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 || 1271 dev->ctl(dev, GET_MTUS, &mtutab) < 0 || 1272 dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 || 1273 dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0) 1274 goto out_free; 1275 1276 err = -ENOMEM; 1277 RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity)); 1278 if (!L2DATA(dev)) 1279 goto out_free; 1280 1281 natids = min(tid_range.num / 2, MAX_ATIDS); 1282 err = init_tid_tabs(&t->tid_maps, tid_range.num, natids, 1283 stid_range.num, ATID_BASE, stid_range.base); 1284 if (err) 1285 goto out_free_l2t; 1286 1287 t->mtus = mtutab.mtus; 1288 t->nmtus = mtutab.size; 1289 1290 INIT_WORK(&t->tid_release_task, t3_process_tid_release_list); 1291 spin_lock_init(&t->tid_release_lock); 1292 INIT_LIST_HEAD(&t->list_node); 1293 t->dev = dev; 1294 1295 T3C_DATA(dev) = t; 1296 dev->recv = process_rx; 1297 dev->neigh_update = t3_l2t_update; 1298 1299 /* Register netevent handler once */ 1300 if (list_empty(&adapter_list)) 1301 register_netevent_notifier(&nb); 1302 1303 t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL); 1304 t->release_list_incomplete = 0; 1305 1306 add_adapter(adapter); 1307 return 0; 1308 1309 out_free_l2t: 1310 t3_free_l2t(L2DATA(dev)); 1311 RCU_INIT_POINTER(dev->l2opt, NULL); 1312 out_free: 1313 kfree(t); 1314 return err; 1315 } 1316 1317 static void clean_l2_data(struct rcu_head *head) 1318 { 1319 struct l2t_data *d = container_of(head, struct l2t_data, rcu_head); 1320 t3_free_l2t(d); 1321 } 1322 1323 1324 void cxgb3_offload_deactivate(struct adapter *adapter) 1325 { 1326 struct t3cdev *tdev = &adapter->tdev; 1327 struct t3c_data *t = T3C_DATA(tdev); 1328 struct l2t_data *d; 1329 1330 remove_adapter(adapter); 1331 if (list_empty(&adapter_list)) 1332 unregister_netevent_notifier(&nb); 1333 1334 free_tid_maps(&t->tid_maps); 1335 T3C_DATA(tdev) = NULL; 1336 rcu_read_lock(); 1337 d = L2DATA(tdev); 1338 rcu_read_unlock(); 1339 RCU_INIT_POINTER(tdev->l2opt, NULL); 1340 call_rcu(&d->rcu_head, clean_l2_data); 1341 if (t->nofail_skb) 1342 kfree_skb(t->nofail_skb); 1343 kfree(t); 1344 } 1345 1346 static inline void register_tdev(struct t3cdev *tdev) 1347 { 1348 static int unit; 1349 1350 mutex_lock(&cxgb3_db_lock); 1351 snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++); 1352 list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list); 1353 mutex_unlock(&cxgb3_db_lock); 1354 } 1355 1356 static inline void unregister_tdev(struct t3cdev *tdev) 1357 { 1358 mutex_lock(&cxgb3_db_lock); 1359 list_del(&tdev->ofld_dev_list); 1360 mutex_unlock(&cxgb3_db_lock); 1361 } 1362 1363 static inline int adap2type(struct adapter *adapter) 1364 { 1365 int type = 0; 1366 1367 switch (adapter->params.rev) { 1368 case T3_REV_A: 1369 type = T3A; 1370 break; 1371 case T3_REV_B: 1372 case T3_REV_B2: 1373 type = T3B; 1374 break; 1375 case T3_REV_C: 1376 type = T3C; 1377 break; 1378 } 1379 return type; 1380 } 1381 1382 void __devinit cxgb3_adapter_ofld(struct adapter *adapter) 1383 { 1384 struct t3cdev *tdev = &adapter->tdev; 1385 1386 INIT_LIST_HEAD(&tdev->ofld_dev_list); 1387 1388 cxgb3_set_dummy_ops(tdev); 1389 tdev->send = t3_offload_tx; 1390 tdev->ctl = cxgb_offload_ctl; 1391 tdev->type = adap2type(adapter); 1392 1393 register_tdev(tdev); 1394 } 1395 1396 void __devexit cxgb3_adapter_unofld(struct adapter *adapter) 1397 { 1398 struct t3cdev *tdev = &adapter->tdev; 1399 1400 tdev->recv = NULL; 1401 tdev->neigh_update = NULL; 1402 1403 unregister_tdev(tdev); 1404 } 1405 1406 void __init cxgb3_offload_init(void) 1407 { 1408 int i; 1409 1410 for (i = 0; i < NUM_CPL_CMDS; ++i) 1411 cpl_handlers[i] = do_bad_cpl; 1412 1413 t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl); 1414 t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl); 1415 t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl); 1416 t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl); 1417 t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl); 1418 t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr); 1419 t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl); 1420 t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl); 1421 t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl); 1422 t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl); 1423 t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl); 1424 t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl); 1425 t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl); 1426 t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl); 1427 t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl); 1428 t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl); 1429 t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss); 1430 t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish); 1431 t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl); 1432 t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl); 1433 t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term); 1434 t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl); 1435 t3_register_cpl_handler(CPL_TRACE_PKT, do_trace); 1436 t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl); 1437 t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl); 1438 t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl); 1439 } 1440