1 /* 2 * This file is part of the Chelsio T4 Ethernet driver for Linux. 3 * 4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 36 37 #include <linux/bitmap.h> 38 #include <linux/crc32.h> 39 #include <linux/ctype.h> 40 #include <linux/debugfs.h> 41 #include <linux/err.h> 42 #include <linux/etherdevice.h> 43 #include <linux/firmware.h> 44 #include <linux/if.h> 45 #include <linux/if_vlan.h> 46 #include <linux/init.h> 47 #include <linux/log2.h> 48 #include <linux/mdio.h> 49 #include <linux/module.h> 50 #include <linux/moduleparam.h> 51 #include <linux/mutex.h> 52 #include <linux/netdevice.h> 53 #include <linux/pci.h> 54 #include <linux/aer.h> 55 #include <linux/rtnetlink.h> 56 #include <linux/sched.h> 57 #include <linux/seq_file.h> 58 #include <linux/sockios.h> 59 #include <linux/vmalloc.h> 60 #include <linux/workqueue.h> 61 #include <net/neighbour.h> 62 #include <net/netevent.h> 63 #include <net/addrconf.h> 64 #include <net/bonding.h> 65 #include <net/addrconf.h> 66 #include <linux/uaccess.h> 67 #include <linux/crash_dump.h> 68 69 #include "cxgb4.h" 70 #include "cxgb4_filter.h" 71 #include "t4_regs.h" 72 #include "t4_values.h" 73 #include "t4_msg.h" 74 #include "t4fw_api.h" 75 #include "t4fw_version.h" 76 #include "cxgb4_dcb.h" 77 #include "cxgb4_debugfs.h" 78 #include "clip_tbl.h" 79 #include "l2t.h" 80 #include "sched.h" 81 #include "cxgb4_tc_u32.h" 82 83 char cxgb4_driver_name[] = KBUILD_MODNAME; 84 85 #ifdef DRV_VERSION 86 #undef DRV_VERSION 87 #endif 88 #define DRV_VERSION "2.0.0-ko" 89 const char cxgb4_driver_version[] = DRV_VERSION; 90 #define DRV_DESC "Chelsio T4/T5/T6 Network Driver" 91 92 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ 93 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ 94 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) 95 96 /* Macros needed to support the PCI Device ID Table ... 97 */ 98 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ 99 static const struct pci_device_id cxgb4_pci_tbl[] = { 100 #define CH_PCI_DEVICE_ID_FUNCTION 0x4 101 102 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is 103 * called for both. 104 */ 105 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0 106 107 #define CH_PCI_ID_TABLE_ENTRY(devid) \ 108 {PCI_VDEVICE(CHELSIO, (devid)), 4} 109 110 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \ 111 { 0, } \ 112 } 113 114 #include "t4_pci_id_tbl.h" 115 116 #define FW4_FNAME "cxgb4/t4fw.bin" 117 #define FW5_FNAME "cxgb4/t5fw.bin" 118 #define FW6_FNAME "cxgb4/t6fw.bin" 119 #define FW4_CFNAME "cxgb4/t4-config.txt" 120 #define FW5_CFNAME "cxgb4/t5-config.txt" 121 #define FW6_CFNAME "cxgb4/t6-config.txt" 122 #define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld" 123 #define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin" 124 #define PHY_AQ1202_DEVICEID 0x4409 125 #define PHY_BCM84834_DEVICEID 0x4486 126 127 MODULE_DESCRIPTION(DRV_DESC); 128 MODULE_AUTHOR("Chelsio Communications"); 129 MODULE_LICENSE("Dual BSD/GPL"); 130 MODULE_VERSION(DRV_VERSION); 131 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); 132 MODULE_FIRMWARE(FW4_FNAME); 133 MODULE_FIRMWARE(FW5_FNAME); 134 MODULE_FIRMWARE(FW6_FNAME); 135 136 /* 137 * The driver uses the best interrupt scheme available on a platform in the 138 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which 139 * of these schemes the driver may consider as follows: 140 * 141 * msi = 2: choose from among all three options 142 * msi = 1: only consider MSI and INTx interrupts 143 * msi = 0: force INTx interrupts 144 */ 145 static int msi = 2; 146 147 module_param(msi, int, 0644); 148 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)"); 149 150 /* 151 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers 152 * offset by 2 bytes in order to have the IP headers line up on 4-byte 153 * boundaries. This is a requirement for many architectures which will throw 154 * a machine check fault if an attempt is made to access one of the 4-byte IP 155 * header fields on a non-4-byte boundary. And it's a major performance issue 156 * even on some architectures which allow it like some implementations of the 157 * x86 ISA. However, some architectures don't mind this and for some very 158 * edge-case performance sensitive applications (like forwarding large volumes 159 * of small packets), setting this DMA offset to 0 will decrease the number of 160 * PCI-E Bus transfers enough to measurably affect performance. 161 */ 162 static int rx_dma_offset = 2; 163 164 /* TX Queue select used to determine what algorithm to use for selecting TX 165 * queue. Select between the kernel provided function (select_queue=0) or user 166 * cxgb_select_queue function (select_queue=1) 167 * 168 * Default: select_queue=0 169 */ 170 static int select_queue; 171 module_param(select_queue, int, 0644); 172 MODULE_PARM_DESC(select_queue, 173 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method."); 174 175 static struct dentry *cxgb4_debugfs_root; 176 177 LIST_HEAD(adapter_list); 178 DEFINE_MUTEX(uld_mutex); 179 180 static void link_report(struct net_device *dev) 181 { 182 if (!netif_carrier_ok(dev)) 183 netdev_info(dev, "link down\n"); 184 else { 185 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" }; 186 187 const char *s; 188 const struct port_info *p = netdev_priv(dev); 189 190 switch (p->link_cfg.speed) { 191 case 100: 192 s = "100Mbps"; 193 break; 194 case 1000: 195 s = "1Gbps"; 196 break; 197 case 10000: 198 s = "10Gbps"; 199 break; 200 case 25000: 201 s = "25Gbps"; 202 break; 203 case 40000: 204 s = "40Gbps"; 205 break; 206 case 100000: 207 s = "100Gbps"; 208 break; 209 default: 210 pr_info("%s: unsupported speed: %d\n", 211 dev->name, p->link_cfg.speed); 212 return; 213 } 214 215 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, 216 fc[p->link_cfg.fc]); 217 } 218 } 219 220 #ifdef CONFIG_CHELSIO_T4_DCB 221 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */ 222 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable) 223 { 224 struct port_info *pi = netdev_priv(dev); 225 struct adapter *adap = pi->adapter; 226 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset]; 227 int i; 228 229 /* We use a simple mapping of Port TX Queue Index to DCB 230 * Priority when we're enabling DCB. 231 */ 232 for (i = 0; i < pi->nqsets; i++, txq++) { 233 u32 name, value; 234 int err; 235 236 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | 237 FW_PARAMS_PARAM_X_V( 238 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) | 239 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id)); 240 value = enable ? i : 0xffffffff; 241 242 /* Since we can be called while atomic (from "interrupt 243 * level") we need to issue the Set Parameters Commannd 244 * without sleeping (timeout < 0). 245 */ 246 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, 247 &name, &value, 248 -FW_CMD_MAX_TIMEOUT); 249 250 if (err) 251 dev_err(adap->pdev_dev, 252 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n", 253 enable ? "set" : "unset", pi->port_id, i, -err); 254 else 255 txq->dcb_prio = value; 256 } 257 } 258 259 static int cxgb4_dcb_enabled(const struct net_device *dev) 260 { 261 struct port_info *pi = netdev_priv(dev); 262 263 if (!pi->dcb.enabled) 264 return 0; 265 266 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) || 267 (pi->dcb.state == CXGB4_DCB_STATE_HOST)); 268 } 269 #endif /* CONFIG_CHELSIO_T4_DCB */ 270 271 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat) 272 { 273 struct net_device *dev = adapter->port[port_id]; 274 275 /* Skip changes from disabled ports. */ 276 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) { 277 if (link_stat) 278 netif_carrier_on(dev); 279 else { 280 #ifdef CONFIG_CHELSIO_T4_DCB 281 if (cxgb4_dcb_enabled(dev)) { 282 cxgb4_dcb_state_init(dev); 283 dcb_tx_queue_prio_enable(dev, false); 284 } 285 #endif /* CONFIG_CHELSIO_T4_DCB */ 286 netif_carrier_off(dev); 287 } 288 289 link_report(dev); 290 } 291 } 292 293 void t4_os_portmod_changed(const struct adapter *adap, int port_id) 294 { 295 static const char *mod_str[] = { 296 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM" 297 }; 298 299 const struct net_device *dev = adap->port[port_id]; 300 const struct port_info *pi = netdev_priv(dev); 301 302 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 303 netdev_info(dev, "port module unplugged\n"); 304 else if (pi->mod_type < ARRAY_SIZE(mod_str)) 305 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]); 306 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 307 netdev_info(dev, "%s: unsupported port module inserted\n", 308 dev->name); 309 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 310 netdev_info(dev, "%s: unknown port module inserted\n", 311 dev->name); 312 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR) 313 netdev_info(dev, "%s: transceiver module error\n", dev->name); 314 else 315 netdev_info(dev, "%s: unknown module type %d inserted\n", 316 dev->name, pi->mod_type); 317 } 318 319 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */ 320 module_param(dbfifo_int_thresh, int, 0644); 321 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold"); 322 323 /* 324 * usecs to sleep while draining the dbfifo 325 */ 326 static int dbfifo_drain_delay = 1000; 327 module_param(dbfifo_drain_delay, int, 0644); 328 MODULE_PARM_DESC(dbfifo_drain_delay, 329 "usecs to sleep while draining the dbfifo"); 330 331 static inline int cxgb4_set_addr_hash(struct port_info *pi) 332 { 333 struct adapter *adap = pi->adapter; 334 u64 vec = 0; 335 bool ucast = false; 336 struct hash_mac_addr *entry; 337 338 /* Calculate the hash vector for the updated list and program it */ 339 list_for_each_entry(entry, &adap->mac_hlist, list) { 340 ucast |= is_unicast_ether_addr(entry->addr); 341 vec |= (1ULL << hash_mac_addr(entry->addr)); 342 } 343 return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast, 344 vec, false); 345 } 346 347 static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr) 348 { 349 struct port_info *pi = netdev_priv(netdev); 350 struct adapter *adap = pi->adapter; 351 int ret; 352 u64 mhash = 0; 353 u64 uhash = 0; 354 bool free = false; 355 bool ucast = is_unicast_ether_addr(mac_addr); 356 const u8 *maclist[1] = {mac_addr}; 357 struct hash_mac_addr *new_entry; 358 359 ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist, 360 NULL, ucast ? &uhash : &mhash, false); 361 if (ret < 0) 362 goto out; 363 /* if hash != 0, then add the addr to hash addr list 364 * so on the end we will calculate the hash for the 365 * list and program it 366 */ 367 if (uhash || mhash) { 368 new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC); 369 if (!new_entry) 370 return -ENOMEM; 371 ether_addr_copy(new_entry->addr, mac_addr); 372 list_add_tail(&new_entry->list, &adap->mac_hlist); 373 ret = cxgb4_set_addr_hash(pi); 374 } 375 out: 376 return ret < 0 ? ret : 0; 377 } 378 379 static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr) 380 { 381 struct port_info *pi = netdev_priv(netdev); 382 struct adapter *adap = pi->adapter; 383 int ret; 384 const u8 *maclist[1] = {mac_addr}; 385 struct hash_mac_addr *entry, *tmp; 386 387 /* If the MAC address to be removed is in the hash addr 388 * list, delete it from the list and update hash vector 389 */ 390 list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) { 391 if (ether_addr_equal(entry->addr, mac_addr)) { 392 list_del(&entry->list); 393 kfree(entry); 394 return cxgb4_set_addr_hash(pi); 395 } 396 } 397 398 ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false); 399 return ret < 0 ? -EINVAL : 0; 400 } 401 402 /* 403 * Set Rx properties of a port, such as promiscruity, address filters, and MTU. 404 * If @mtu is -1 it is left unchanged. 405 */ 406 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) 407 { 408 struct port_info *pi = netdev_priv(dev); 409 struct adapter *adapter = pi->adapter; 410 411 __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); 412 __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); 413 414 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, 415 (dev->flags & IFF_PROMISC) ? 1 : 0, 416 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1, 417 sleep_ok); 418 } 419 420 /** 421 * link_start - enable a port 422 * @dev: the port to enable 423 * 424 * Performs the MAC and PHY actions needed to enable a port. 425 */ 426 static int link_start(struct net_device *dev) 427 { 428 int ret; 429 struct port_info *pi = netdev_priv(dev); 430 unsigned int mb = pi->adapter->pf; 431 432 /* 433 * We do not set address filters and promiscuity here, the stack does 434 * that step explicitly. 435 */ 436 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1, 437 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true); 438 if (ret == 0) { 439 ret = t4_change_mac(pi->adapter, mb, pi->viid, 440 pi->xact_addr_filt, dev->dev_addr, true, 441 true); 442 if (ret >= 0) { 443 pi->xact_addr_filt = ret; 444 ret = 0; 445 } 446 } 447 if (ret == 0) 448 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan, 449 &pi->link_cfg); 450 if (ret == 0) { 451 local_bh_disable(); 452 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true, 453 true, CXGB4_DCB_ENABLED); 454 local_bh_enable(); 455 } 456 457 return ret; 458 } 459 460 #ifdef CONFIG_CHELSIO_T4_DCB 461 /* Handle a Data Center Bridging update message from the firmware. */ 462 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd) 463 { 464 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid)); 465 struct net_device *dev = adap->port[adap->chan_map[port]]; 466 int old_dcb_enabled = cxgb4_dcb_enabled(dev); 467 int new_dcb_enabled; 468 469 cxgb4_dcb_handle_fw_update(adap, pcmd); 470 new_dcb_enabled = cxgb4_dcb_enabled(dev); 471 472 /* If the DCB has become enabled or disabled on the port then we're 473 * going to need to set up/tear down DCB Priority parameters for the 474 * TX Queues associated with the port. 475 */ 476 if (new_dcb_enabled != old_dcb_enabled) 477 dcb_tx_queue_prio_enable(dev, new_dcb_enabled); 478 } 479 #endif /* CONFIG_CHELSIO_T4_DCB */ 480 481 /* Response queue handler for the FW event queue. 482 */ 483 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, 484 const struct pkt_gl *gl) 485 { 486 u8 opcode = ((const struct rss_header *)rsp)->opcode; 487 488 rsp++; /* skip RSS header */ 489 490 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG. 491 */ 492 if (unlikely(opcode == CPL_FW4_MSG && 493 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) { 494 rsp++; 495 opcode = ((const struct rss_header *)rsp)->opcode; 496 rsp++; 497 if (opcode != CPL_SGE_EGR_UPDATE) { 498 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n" 499 , opcode); 500 goto out; 501 } 502 } 503 504 if (likely(opcode == CPL_SGE_EGR_UPDATE)) { 505 const struct cpl_sge_egr_update *p = (void *)rsp; 506 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid)); 507 struct sge_txq *txq; 508 509 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; 510 txq->restarts++; 511 if (txq->q_type == CXGB4_TXQ_ETH) { 512 struct sge_eth_txq *eq; 513 514 eq = container_of(txq, struct sge_eth_txq, q); 515 netif_tx_wake_queue(eq->txq); 516 } else { 517 struct sge_uld_txq *oq; 518 519 oq = container_of(txq, struct sge_uld_txq, q); 520 tasklet_schedule(&oq->qresume_tsk); 521 } 522 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) { 523 const struct cpl_fw6_msg *p = (void *)rsp; 524 525 #ifdef CONFIG_CHELSIO_T4_DCB 526 const struct fw_port_cmd *pcmd = (const void *)p->data; 527 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid)); 528 unsigned int action = 529 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16)); 530 531 if (cmd == FW_PORT_CMD && 532 action == FW_PORT_ACTION_GET_PORT_INFO) { 533 int port = FW_PORT_CMD_PORTID_G( 534 be32_to_cpu(pcmd->op_to_portid)); 535 struct net_device *dev = 536 q->adap->port[q->adap->chan_map[port]]; 537 int state_input = ((pcmd->u.info.dcbxdis_pkd & 538 FW_PORT_CMD_DCBXDIS_F) 539 ? CXGB4_DCB_INPUT_FW_DISABLED 540 : CXGB4_DCB_INPUT_FW_ENABLED); 541 542 cxgb4_dcb_state_fsm(dev, state_input); 543 } 544 545 if (cmd == FW_PORT_CMD && 546 action == FW_PORT_ACTION_L2_DCB_CFG) 547 dcb_rpl(q->adap, pcmd); 548 else 549 #endif 550 if (p->type == 0) 551 t4_handle_fw_rpl(q->adap, p->data); 552 } else if (opcode == CPL_L2T_WRITE_RPL) { 553 const struct cpl_l2t_write_rpl *p = (void *)rsp; 554 555 do_l2t_write_rpl(q->adap, p); 556 } else if (opcode == CPL_SET_TCB_RPL) { 557 const struct cpl_set_tcb_rpl *p = (void *)rsp; 558 559 filter_rpl(q->adap, p); 560 } else 561 dev_err(q->adap->pdev_dev, 562 "unexpected CPL %#x on FW event queue\n", opcode); 563 out: 564 return 0; 565 } 566 567 static void disable_msi(struct adapter *adapter) 568 { 569 if (adapter->flags & USING_MSIX) { 570 pci_disable_msix(adapter->pdev); 571 adapter->flags &= ~USING_MSIX; 572 } else if (adapter->flags & USING_MSI) { 573 pci_disable_msi(adapter->pdev); 574 adapter->flags &= ~USING_MSI; 575 } 576 } 577 578 /* 579 * Interrupt handler for non-data events used with MSI-X. 580 */ 581 static irqreturn_t t4_nondata_intr(int irq, void *cookie) 582 { 583 struct adapter *adap = cookie; 584 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A)); 585 586 if (v & PFSW_F) { 587 adap->swintr = 1; 588 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v); 589 } 590 if (adap->flags & MASTER_PF) 591 t4_slow_intr_handler(adap); 592 return IRQ_HANDLED; 593 } 594 595 /* 596 * Name the MSI-X interrupts. 597 */ 598 static void name_msix_vecs(struct adapter *adap) 599 { 600 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc); 601 602 /* non-data interrupts */ 603 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name); 604 605 /* FW events */ 606 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", 607 adap->port[0]->name); 608 609 /* Ethernet queues */ 610 for_each_port(adap, j) { 611 struct net_device *d = adap->port[j]; 612 const struct port_info *pi = netdev_priv(d); 613 614 for (i = 0; i < pi->nqsets; i++, msi_idx++) 615 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d", 616 d->name, i); 617 } 618 } 619 620 static int request_msix_queue_irqs(struct adapter *adap) 621 { 622 struct sge *s = &adap->sge; 623 int err, ethqidx; 624 int msi_index = 2; 625 626 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, 627 adap->msix_info[1].desc, &s->fw_evtq); 628 if (err) 629 return err; 630 631 for_each_ethrxq(s, ethqidx) { 632 err = request_irq(adap->msix_info[msi_index].vec, 633 t4_sge_intr_msix, 0, 634 adap->msix_info[msi_index].desc, 635 &s->ethrxq[ethqidx].rspq); 636 if (err) 637 goto unwind; 638 msi_index++; 639 } 640 return 0; 641 642 unwind: 643 while (--ethqidx >= 0) 644 free_irq(adap->msix_info[--msi_index].vec, 645 &s->ethrxq[ethqidx].rspq); 646 free_irq(adap->msix_info[1].vec, &s->fw_evtq); 647 return err; 648 } 649 650 static void free_msix_queue_irqs(struct adapter *adap) 651 { 652 int i, msi_index = 2; 653 struct sge *s = &adap->sge; 654 655 free_irq(adap->msix_info[1].vec, &s->fw_evtq); 656 for_each_ethrxq(s, i) 657 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq); 658 } 659 660 /** 661 * cxgb4_write_rss - write the RSS table for a given port 662 * @pi: the port 663 * @queues: array of queue indices for RSS 664 * 665 * Sets up the portion of the HW RSS table for the port's VI to distribute 666 * packets to the Rx queues in @queues. 667 * Should never be called before setting up sge eth rx queues 668 */ 669 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues) 670 { 671 u16 *rss; 672 int i, err; 673 struct adapter *adapter = pi->adapter; 674 const struct sge_eth_rxq *rxq; 675 676 rxq = &adapter->sge.ethrxq[pi->first_qset]; 677 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL); 678 if (!rss) 679 return -ENOMEM; 680 681 /* map the queue indices to queue ids */ 682 for (i = 0; i < pi->rss_size; i++, queues++) 683 rss[i] = rxq[*queues].rspq.abs_id; 684 685 err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0, 686 pi->rss_size, rss, pi->rss_size); 687 /* If Tunnel All Lookup isn't specified in the global RSS 688 * Configuration, then we need to specify a default Ingress 689 * Queue for any ingress packets which aren't hashed. We'll 690 * use our first ingress queue ... 691 */ 692 if (!err) 693 err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid, 694 FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F | 695 FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F | 696 FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F | 697 FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F | 698 FW_RSS_VI_CONFIG_CMD_UDPEN_F, 699 rss[0]); 700 kfree(rss); 701 return err; 702 } 703 704 /** 705 * setup_rss - configure RSS 706 * @adap: the adapter 707 * 708 * Sets up RSS for each port. 709 */ 710 static int setup_rss(struct adapter *adap) 711 { 712 int i, j, err; 713 714 for_each_port(adap, i) { 715 const struct port_info *pi = adap2pinfo(adap, i); 716 717 /* Fill default values with equal distribution */ 718 for (j = 0; j < pi->rss_size; j++) 719 pi->rss[j] = j % pi->nqsets; 720 721 err = cxgb4_write_rss(pi, pi->rss); 722 if (err) 723 return err; 724 } 725 return 0; 726 } 727 728 /* 729 * Return the channel of the ingress queue with the given qid. 730 */ 731 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid) 732 { 733 qid -= p->ingr_start; 734 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan; 735 } 736 737 /* 738 * Wait until all NAPI handlers are descheduled. 739 */ 740 static void quiesce_rx(struct adapter *adap) 741 { 742 int i; 743 744 for (i = 0; i < adap->sge.ingr_sz; i++) { 745 struct sge_rspq *q = adap->sge.ingr_map[i]; 746 747 if (q && q->handler) 748 napi_disable(&q->napi); 749 } 750 } 751 752 /* Disable interrupt and napi handler */ 753 static void disable_interrupts(struct adapter *adap) 754 { 755 if (adap->flags & FULL_INIT_DONE) { 756 t4_intr_disable(adap); 757 if (adap->flags & USING_MSIX) { 758 free_msix_queue_irqs(adap); 759 free_irq(adap->msix_info[0].vec, adap); 760 } else { 761 free_irq(adap->pdev->irq, adap); 762 } 763 quiesce_rx(adap); 764 } 765 } 766 767 /* 768 * Enable NAPI scheduling and interrupt generation for all Rx queues. 769 */ 770 static void enable_rx(struct adapter *adap) 771 { 772 int i; 773 774 for (i = 0; i < adap->sge.ingr_sz; i++) { 775 struct sge_rspq *q = adap->sge.ingr_map[i]; 776 777 if (!q) 778 continue; 779 if (q->handler) 780 napi_enable(&q->napi); 781 782 /* 0-increment GTS to start the timer and enable interrupts */ 783 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), 784 SEINTARM_V(q->intr_params) | 785 INGRESSQID_V(q->cntxt_id)); 786 } 787 } 788 789 790 static int setup_fw_sge_queues(struct adapter *adap) 791 { 792 struct sge *s = &adap->sge; 793 int err = 0; 794 795 bitmap_zero(s->starving_fl, s->egr_sz); 796 bitmap_zero(s->txq_maperr, s->egr_sz); 797 798 if (adap->flags & USING_MSIX) 799 adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */ 800 else { 801 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, 802 NULL, NULL, NULL, -1); 803 if (err) 804 return err; 805 adap->msi_idx = -((int)s->intrq.abs_id + 1); 806 } 807 808 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], 809 adap->msi_idx, NULL, fwevtq_handler, NULL, -1); 810 if (err) 811 t4_free_sge_resources(adap); 812 return err; 813 } 814 815 /** 816 * setup_sge_queues - configure SGE Tx/Rx/response queues 817 * @adap: the adapter 818 * 819 * Determines how many sets of SGE queues to use and initializes them. 820 * We support multiple queue sets per port if we have MSI-X, otherwise 821 * just one queue set per port. 822 */ 823 static int setup_sge_queues(struct adapter *adap) 824 { 825 int err, i, j; 826 struct sge *s = &adap->sge; 827 struct sge_uld_rxq_info *rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA]; 828 unsigned int cmplqid = 0; 829 830 for_each_port(adap, i) { 831 struct net_device *dev = adap->port[i]; 832 struct port_info *pi = netdev_priv(dev); 833 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset]; 834 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset]; 835 836 for (j = 0; j < pi->nqsets; j++, q++) { 837 if (adap->msi_idx > 0) 838 adap->msi_idx++; 839 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, 840 adap->msi_idx, &q->fl, 841 t4_ethrx_handler, 842 NULL, 843 t4_get_mps_bg_map(adap, 844 pi->tx_chan)); 845 if (err) 846 goto freeout; 847 q->rspq.idx = j; 848 memset(&q->stats, 0, sizeof(q->stats)); 849 } 850 for (j = 0; j < pi->nqsets; j++, t++) { 851 err = t4_sge_alloc_eth_txq(adap, t, dev, 852 netdev_get_tx_queue(dev, j), 853 s->fw_evtq.cntxt_id); 854 if (err) 855 goto freeout; 856 } 857 } 858 859 for_each_port(adap, i) { 860 /* Note that cmplqid below is 0 if we don't 861 * have RDMA queues, and that's the right value. 862 */ 863 if (rxq_info) 864 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id; 865 866 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i], 867 s->fw_evtq.cntxt_id, cmplqid); 868 if (err) 869 goto freeout; 870 } 871 872 t4_write_reg(adap, is_t4(adap->params.chip) ? 873 MPS_TRC_RSS_CONTROL_A : 874 MPS_T5_TRC_RSS_CONTROL_A, 875 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) | 876 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id)); 877 return 0; 878 freeout: 879 t4_free_sge_resources(adap); 880 return err; 881 } 882 883 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, 884 void *accel_priv, select_queue_fallback_t fallback) 885 { 886 int txq; 887 888 #ifdef CONFIG_CHELSIO_T4_DCB 889 /* If a Data Center Bridging has been successfully negotiated on this 890 * link then we'll use the skb's priority to map it to a TX Queue. 891 * The skb's priority is determined via the VLAN Tag Priority Code 892 * Point field. 893 */ 894 if (cxgb4_dcb_enabled(dev)) { 895 u16 vlan_tci; 896 int err; 897 898 err = vlan_get_tag(skb, &vlan_tci); 899 if (unlikely(err)) { 900 if (net_ratelimit()) 901 netdev_warn(dev, 902 "TX Packet without VLAN Tag on DCB Link\n"); 903 txq = 0; 904 } else { 905 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 906 #ifdef CONFIG_CHELSIO_T4_FCOE 907 if (skb->protocol == htons(ETH_P_FCOE)) 908 txq = skb->priority & 0x7; 909 #endif /* CONFIG_CHELSIO_T4_FCOE */ 910 } 911 return txq; 912 } 913 #endif /* CONFIG_CHELSIO_T4_DCB */ 914 915 if (select_queue) { 916 txq = (skb_rx_queue_recorded(skb) 917 ? skb_get_rx_queue(skb) 918 : smp_processor_id()); 919 920 while (unlikely(txq >= dev->real_num_tx_queues)) 921 txq -= dev->real_num_tx_queues; 922 923 return txq; 924 } 925 926 return fallback(dev, skb) % dev->real_num_tx_queues; 927 } 928 929 static int closest_timer(const struct sge *s, int time) 930 { 931 int i, delta, match = 0, min_delta = INT_MAX; 932 933 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { 934 delta = time - s->timer_val[i]; 935 if (delta < 0) 936 delta = -delta; 937 if (delta < min_delta) { 938 min_delta = delta; 939 match = i; 940 } 941 } 942 return match; 943 } 944 945 static int closest_thres(const struct sge *s, int thres) 946 { 947 int i, delta, match = 0, min_delta = INT_MAX; 948 949 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { 950 delta = thres - s->counter_val[i]; 951 if (delta < 0) 952 delta = -delta; 953 if (delta < min_delta) { 954 min_delta = delta; 955 match = i; 956 } 957 } 958 return match; 959 } 960 961 /** 962 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters 963 * @q: the Rx queue 964 * @us: the hold-off time in us, or 0 to disable timer 965 * @cnt: the hold-off packet count, or 0 to disable counter 966 * 967 * Sets an Rx queue's interrupt hold-off time and packet count. At least 968 * one of the two needs to be enabled for the queue to generate interrupts. 969 */ 970 int cxgb4_set_rspq_intr_params(struct sge_rspq *q, 971 unsigned int us, unsigned int cnt) 972 { 973 struct adapter *adap = q->adap; 974 975 if ((us | cnt) == 0) 976 cnt = 1; 977 978 if (cnt) { 979 int err; 980 u32 v, new_idx; 981 982 new_idx = closest_thres(&adap->sge, cnt); 983 if (q->desc && q->pktcnt_idx != new_idx) { 984 /* the queue has already been created, update it */ 985 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | 986 FW_PARAMS_PARAM_X_V( 987 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | 988 FW_PARAMS_PARAM_YZ_V(q->cntxt_id); 989 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, 990 &v, &new_idx); 991 if (err) 992 return err; 993 } 994 q->pktcnt_idx = new_idx; 995 } 996 997 us = us == 0 ? 6 : closest_timer(&adap->sge, us); 998 q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0); 999 return 0; 1000 } 1001 1002 static int cxgb_set_features(struct net_device *dev, netdev_features_t features) 1003 { 1004 const struct port_info *pi = netdev_priv(dev); 1005 netdev_features_t changed = dev->features ^ features; 1006 int err; 1007 1008 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX)) 1009 return 0; 1010 1011 err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1, 1012 -1, -1, -1, 1013 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true); 1014 if (unlikely(err)) 1015 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX; 1016 return err; 1017 } 1018 1019 static int setup_debugfs(struct adapter *adap) 1020 { 1021 if (IS_ERR_OR_NULL(adap->debugfs_root)) 1022 return -1; 1023 1024 #ifdef CONFIG_DEBUG_FS 1025 t4_setup_debugfs(adap); 1026 #endif 1027 return 0; 1028 } 1029 1030 /* 1031 * upper-layer driver support 1032 */ 1033 1034 /* 1035 * Allocate an active-open TID and set it to the supplied value. 1036 */ 1037 int cxgb4_alloc_atid(struct tid_info *t, void *data) 1038 { 1039 int atid = -1; 1040 1041 spin_lock_bh(&t->atid_lock); 1042 if (t->afree) { 1043 union aopen_entry *p = t->afree; 1044 1045 atid = (p - t->atid_tab) + t->atid_base; 1046 t->afree = p->next; 1047 p->data = data; 1048 t->atids_in_use++; 1049 } 1050 spin_unlock_bh(&t->atid_lock); 1051 return atid; 1052 } 1053 EXPORT_SYMBOL(cxgb4_alloc_atid); 1054 1055 /* 1056 * Release an active-open TID. 1057 */ 1058 void cxgb4_free_atid(struct tid_info *t, unsigned int atid) 1059 { 1060 union aopen_entry *p = &t->atid_tab[atid - t->atid_base]; 1061 1062 spin_lock_bh(&t->atid_lock); 1063 p->next = t->afree; 1064 t->afree = p; 1065 t->atids_in_use--; 1066 spin_unlock_bh(&t->atid_lock); 1067 } 1068 EXPORT_SYMBOL(cxgb4_free_atid); 1069 1070 /* 1071 * Allocate a server TID and set it to the supplied value. 1072 */ 1073 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) 1074 { 1075 int stid; 1076 1077 spin_lock_bh(&t->stid_lock); 1078 if (family == PF_INET) { 1079 stid = find_first_zero_bit(t->stid_bmap, t->nstids); 1080 if (stid < t->nstids) 1081 __set_bit(stid, t->stid_bmap); 1082 else 1083 stid = -1; 1084 } else { 1085 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1); 1086 if (stid < 0) 1087 stid = -1; 1088 } 1089 if (stid >= 0) { 1090 t->stid_tab[stid].data = data; 1091 stid += t->stid_base; 1092 /* IPv6 requires max of 520 bits or 16 cells in TCAM 1093 * This is equivalent to 4 TIDs. With CLIP enabled it 1094 * needs 2 TIDs. 1095 */ 1096 if (family == PF_INET) 1097 t->stids_in_use++; 1098 else 1099 t->stids_in_use += 2; 1100 } 1101 spin_unlock_bh(&t->stid_lock); 1102 return stid; 1103 } 1104 EXPORT_SYMBOL(cxgb4_alloc_stid); 1105 1106 /* Allocate a server filter TID and set it to the supplied value. 1107 */ 1108 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data) 1109 { 1110 int stid; 1111 1112 spin_lock_bh(&t->stid_lock); 1113 if (family == PF_INET) { 1114 stid = find_next_zero_bit(t->stid_bmap, 1115 t->nstids + t->nsftids, t->nstids); 1116 if (stid < (t->nstids + t->nsftids)) 1117 __set_bit(stid, t->stid_bmap); 1118 else 1119 stid = -1; 1120 } else { 1121 stid = -1; 1122 } 1123 if (stid >= 0) { 1124 t->stid_tab[stid].data = data; 1125 stid -= t->nstids; 1126 stid += t->sftid_base; 1127 t->sftids_in_use++; 1128 } 1129 spin_unlock_bh(&t->stid_lock); 1130 return stid; 1131 } 1132 EXPORT_SYMBOL(cxgb4_alloc_sftid); 1133 1134 /* Release a server TID. 1135 */ 1136 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) 1137 { 1138 /* Is it a server filter TID? */ 1139 if (t->nsftids && (stid >= t->sftid_base)) { 1140 stid -= t->sftid_base; 1141 stid += t->nstids; 1142 } else { 1143 stid -= t->stid_base; 1144 } 1145 1146 spin_lock_bh(&t->stid_lock); 1147 if (family == PF_INET) 1148 __clear_bit(stid, t->stid_bmap); 1149 else 1150 bitmap_release_region(t->stid_bmap, stid, 1); 1151 t->stid_tab[stid].data = NULL; 1152 if (stid < t->nstids) { 1153 if (family == PF_INET) 1154 t->stids_in_use--; 1155 else 1156 t->stids_in_use -= 2; 1157 } else { 1158 t->sftids_in_use--; 1159 } 1160 spin_unlock_bh(&t->stid_lock); 1161 } 1162 EXPORT_SYMBOL(cxgb4_free_stid); 1163 1164 /* 1165 * Populate a TID_RELEASE WR. Caller must properly size the skb. 1166 */ 1167 static void mk_tid_release(struct sk_buff *skb, unsigned int chan, 1168 unsigned int tid) 1169 { 1170 struct cpl_tid_release *req; 1171 1172 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan); 1173 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); 1174 INIT_TP_WR(req, tid); 1175 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); 1176 } 1177 1178 /* 1179 * Queue a TID release request and if necessary schedule a work queue to 1180 * process it. 1181 */ 1182 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, 1183 unsigned int tid) 1184 { 1185 void **p = &t->tid_tab[tid]; 1186 struct adapter *adap = container_of(t, struct adapter, tids); 1187 1188 spin_lock_bh(&adap->tid_release_lock); 1189 *p = adap->tid_release_head; 1190 /* Low 2 bits encode the Tx channel number */ 1191 adap->tid_release_head = (void **)((uintptr_t)p | chan); 1192 if (!adap->tid_release_task_busy) { 1193 adap->tid_release_task_busy = true; 1194 queue_work(adap->workq, &adap->tid_release_task); 1195 } 1196 spin_unlock_bh(&adap->tid_release_lock); 1197 } 1198 1199 /* 1200 * Process the list of pending TID release requests. 1201 */ 1202 static void process_tid_release_list(struct work_struct *work) 1203 { 1204 struct sk_buff *skb; 1205 struct adapter *adap; 1206 1207 adap = container_of(work, struct adapter, tid_release_task); 1208 1209 spin_lock_bh(&adap->tid_release_lock); 1210 while (adap->tid_release_head) { 1211 void **p = adap->tid_release_head; 1212 unsigned int chan = (uintptr_t)p & 3; 1213 p = (void *)p - chan; 1214 1215 adap->tid_release_head = *p; 1216 *p = NULL; 1217 spin_unlock_bh(&adap->tid_release_lock); 1218 1219 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release), 1220 GFP_KERNEL))) 1221 schedule_timeout_uninterruptible(1); 1222 1223 mk_tid_release(skb, chan, p - adap->tids.tid_tab); 1224 t4_ofld_send(adap, skb); 1225 spin_lock_bh(&adap->tid_release_lock); 1226 } 1227 adap->tid_release_task_busy = false; 1228 spin_unlock_bh(&adap->tid_release_lock); 1229 } 1230 1231 /* 1232 * Release a TID and inform HW. If we are unable to allocate the release 1233 * message we defer to a work queue. 1234 */ 1235 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid) 1236 { 1237 struct sk_buff *skb; 1238 struct adapter *adap = container_of(t, struct adapter, tids); 1239 1240 WARN_ON(tid >= t->ntids); 1241 1242 if (t->tid_tab[tid]) { 1243 t->tid_tab[tid] = NULL; 1244 if (t->hash_base && (tid >= t->hash_base)) 1245 atomic_dec(&t->hash_tids_in_use); 1246 else 1247 atomic_dec(&t->tids_in_use); 1248 } 1249 1250 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); 1251 if (likely(skb)) { 1252 mk_tid_release(skb, chan, tid); 1253 t4_ofld_send(adap, skb); 1254 } else 1255 cxgb4_queue_tid_release(t, chan, tid); 1256 } 1257 EXPORT_SYMBOL(cxgb4_remove_tid); 1258 1259 /* 1260 * Allocate and initialize the TID tables. Returns 0 on success. 1261 */ 1262 static int tid_init(struct tid_info *t) 1263 { 1264 struct adapter *adap = container_of(t, struct adapter, tids); 1265 unsigned int max_ftids = t->nftids + t->nsftids; 1266 unsigned int natids = t->natids; 1267 unsigned int stid_bmap_size; 1268 unsigned int ftid_bmap_size; 1269 size_t size; 1270 1271 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); 1272 ftid_bmap_size = BITS_TO_LONGS(t->nftids); 1273 size = t->ntids * sizeof(*t->tid_tab) + 1274 natids * sizeof(*t->atid_tab) + 1275 t->nstids * sizeof(*t->stid_tab) + 1276 t->nsftids * sizeof(*t->stid_tab) + 1277 stid_bmap_size * sizeof(long) + 1278 max_ftids * sizeof(*t->ftid_tab) + 1279 ftid_bmap_size * sizeof(long); 1280 1281 t->tid_tab = kvzalloc(size, GFP_KERNEL); 1282 if (!t->tid_tab) 1283 return -ENOMEM; 1284 1285 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; 1286 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids]; 1287 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids]; 1288 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size]; 1289 t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids]; 1290 spin_lock_init(&t->stid_lock); 1291 spin_lock_init(&t->atid_lock); 1292 spin_lock_init(&t->ftid_lock); 1293 1294 t->stids_in_use = 0; 1295 t->sftids_in_use = 0; 1296 t->afree = NULL; 1297 t->atids_in_use = 0; 1298 atomic_set(&t->tids_in_use, 0); 1299 atomic_set(&t->hash_tids_in_use, 0); 1300 1301 /* Setup the free list for atid_tab and clear the stid bitmap. */ 1302 if (natids) { 1303 while (--natids) 1304 t->atid_tab[natids - 1].next = &t->atid_tab[natids]; 1305 t->afree = t->atid_tab; 1306 } 1307 1308 if (is_offload(adap)) { 1309 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); 1310 /* Reserve stid 0 for T4/T5 adapters */ 1311 if (!t->stid_base && 1312 CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) 1313 __set_bit(0, t->stid_bmap); 1314 } 1315 1316 bitmap_zero(t->ftid_bmap, t->nftids); 1317 return 0; 1318 } 1319 1320 /** 1321 * cxgb4_create_server - create an IP server 1322 * @dev: the device 1323 * @stid: the server TID 1324 * @sip: local IP address to bind server to 1325 * @sport: the server's TCP port 1326 * @queue: queue to direct messages from this server to 1327 * 1328 * Create an IP server for the given port and address. 1329 * Returns <0 on error and one of the %NET_XMIT_* values on success. 1330 */ 1331 int cxgb4_create_server(const struct net_device *dev, unsigned int stid, 1332 __be32 sip, __be16 sport, __be16 vlan, 1333 unsigned int queue) 1334 { 1335 unsigned int chan; 1336 struct sk_buff *skb; 1337 struct adapter *adap; 1338 struct cpl_pass_open_req *req; 1339 int ret; 1340 1341 skb = alloc_skb(sizeof(*req), GFP_KERNEL); 1342 if (!skb) 1343 return -ENOMEM; 1344 1345 adap = netdev2adap(dev); 1346 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req)); 1347 INIT_TP_WR(req, 0); 1348 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid)); 1349 req->local_port = sport; 1350 req->peer_port = htons(0); 1351 req->local_ip = sip; 1352 req->peer_ip = htonl(0); 1353 chan = rxq_to_chan(&adap->sge, queue); 1354 req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); 1355 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) | 1356 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue)); 1357 ret = t4_mgmt_tx(adap, skb); 1358 return net_xmit_eval(ret); 1359 } 1360 EXPORT_SYMBOL(cxgb4_create_server); 1361 1362 /* cxgb4_create_server6 - create an IPv6 server 1363 * @dev: the device 1364 * @stid: the server TID 1365 * @sip: local IPv6 address to bind server to 1366 * @sport: the server's TCP port 1367 * @queue: queue to direct messages from this server to 1368 * 1369 * Create an IPv6 server for the given port and address. 1370 * Returns <0 on error and one of the %NET_XMIT_* values on success. 1371 */ 1372 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, 1373 const struct in6_addr *sip, __be16 sport, 1374 unsigned int queue) 1375 { 1376 unsigned int chan; 1377 struct sk_buff *skb; 1378 struct adapter *adap; 1379 struct cpl_pass_open_req6 *req; 1380 int ret; 1381 1382 skb = alloc_skb(sizeof(*req), GFP_KERNEL); 1383 if (!skb) 1384 return -ENOMEM; 1385 1386 adap = netdev2adap(dev); 1387 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req)); 1388 INIT_TP_WR(req, 0); 1389 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid)); 1390 req->local_port = sport; 1391 req->peer_port = htons(0); 1392 req->local_ip_hi = *(__be64 *)(sip->s6_addr); 1393 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8); 1394 req->peer_ip_hi = cpu_to_be64(0); 1395 req->peer_ip_lo = cpu_to_be64(0); 1396 chan = rxq_to_chan(&adap->sge, queue); 1397 req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); 1398 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) | 1399 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue)); 1400 ret = t4_mgmt_tx(adap, skb); 1401 return net_xmit_eval(ret); 1402 } 1403 EXPORT_SYMBOL(cxgb4_create_server6); 1404 1405 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid, 1406 unsigned int queue, bool ipv6) 1407 { 1408 struct sk_buff *skb; 1409 struct adapter *adap; 1410 struct cpl_close_listsvr_req *req; 1411 int ret; 1412 1413 adap = netdev2adap(dev); 1414 1415 skb = alloc_skb(sizeof(*req), GFP_KERNEL); 1416 if (!skb) 1417 return -ENOMEM; 1418 1419 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req)); 1420 INIT_TP_WR(req, 0); 1421 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid)); 1422 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) : 1423 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue)); 1424 ret = t4_mgmt_tx(adap, skb); 1425 return net_xmit_eval(ret); 1426 } 1427 EXPORT_SYMBOL(cxgb4_remove_server); 1428 1429 /** 1430 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU 1431 * @mtus: the HW MTU table 1432 * @mtu: the target MTU 1433 * @idx: index of selected entry in the MTU table 1434 * 1435 * Returns the index and the value in the HW MTU table that is closest to 1436 * but does not exceed @mtu, unless @mtu is smaller than any value in the 1437 * table, in which case that smallest available value is selected. 1438 */ 1439 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, 1440 unsigned int *idx) 1441 { 1442 unsigned int i = 0; 1443 1444 while (i < NMTUS - 1 && mtus[i + 1] <= mtu) 1445 ++i; 1446 if (idx) 1447 *idx = i; 1448 return mtus[i]; 1449 } 1450 EXPORT_SYMBOL(cxgb4_best_mtu); 1451 1452 /** 1453 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned 1454 * @mtus: the HW MTU table 1455 * @header_size: Header Size 1456 * @data_size_max: maximum Data Segment Size 1457 * @data_size_align: desired Data Segment Size Alignment (2^N) 1458 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL) 1459 * 1460 * Similar to cxgb4_best_mtu() but instead of searching the Hardware 1461 * MTU Table based solely on a Maximum MTU parameter, we break that 1462 * parameter up into a Header Size and Maximum Data Segment Size, and 1463 * provide a desired Data Segment Size Alignment. If we find an MTU in 1464 * the Hardware MTU Table which will result in a Data Segment Size with 1465 * the requested alignment _and_ that MTU isn't "too far" from the 1466 * closest MTU, then we'll return that rather than the closest MTU. 1467 */ 1468 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus, 1469 unsigned short header_size, 1470 unsigned short data_size_max, 1471 unsigned short data_size_align, 1472 unsigned int *mtu_idxp) 1473 { 1474 unsigned short max_mtu = header_size + data_size_max; 1475 unsigned short data_size_align_mask = data_size_align - 1; 1476 int mtu_idx, aligned_mtu_idx; 1477 1478 /* Scan the MTU Table till we find an MTU which is larger than our 1479 * Maximum MTU or we reach the end of the table. Along the way, 1480 * record the last MTU found, if any, which will result in a Data 1481 * Segment Length matching the requested alignment. 1482 */ 1483 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) { 1484 unsigned short data_size = mtus[mtu_idx] - header_size; 1485 1486 /* If this MTU minus the Header Size would result in a 1487 * Data Segment Size of the desired alignment, remember it. 1488 */ 1489 if ((data_size & data_size_align_mask) == 0) 1490 aligned_mtu_idx = mtu_idx; 1491 1492 /* If we're not at the end of the Hardware MTU Table and the 1493 * next element is larger than our Maximum MTU, drop out of 1494 * the loop. 1495 */ 1496 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu) 1497 break; 1498 } 1499 1500 /* If we fell out of the loop because we ran to the end of the table, 1501 * then we just have to use the last [largest] entry. 1502 */ 1503 if (mtu_idx == NMTUS) 1504 mtu_idx--; 1505 1506 /* If we found an MTU which resulted in the requested Data Segment 1507 * Length alignment and that's "not far" from the largest MTU which is 1508 * less than or equal to the maximum MTU, then use that. 1509 */ 1510 if (aligned_mtu_idx >= 0 && 1511 mtu_idx - aligned_mtu_idx <= 1) 1512 mtu_idx = aligned_mtu_idx; 1513 1514 /* If the caller has passed in an MTU Index pointer, pass the 1515 * MTU Index back. Return the MTU value. 1516 */ 1517 if (mtu_idxp) 1518 *mtu_idxp = mtu_idx; 1519 return mtus[mtu_idx]; 1520 } 1521 EXPORT_SYMBOL(cxgb4_best_aligned_mtu); 1522 1523 /** 1524 * cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI 1525 * @chip: chip type 1526 * @viid: VI id of the given port 1527 * 1528 * Return the SMT index for this VI. 1529 */ 1530 unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid) 1531 { 1532 /* In T4/T5, SMT contains 256 SMAC entries organized in 1533 * 128 rows of 2 entries each. 1534 * In T6, SMT contains 256 SMAC entries in 256 rows. 1535 * TODO: The below code needs to be updated when we add support 1536 * for 256 VFs. 1537 */ 1538 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5) 1539 return ((viid & 0x7f) << 1); 1540 else 1541 return (viid & 0x7f); 1542 } 1543 EXPORT_SYMBOL(cxgb4_tp_smt_idx); 1544 1545 /** 1546 * cxgb4_port_chan - get the HW channel of a port 1547 * @dev: the net device for the port 1548 * 1549 * Return the HW Tx channel of the given port. 1550 */ 1551 unsigned int cxgb4_port_chan(const struct net_device *dev) 1552 { 1553 return netdev2pinfo(dev)->tx_chan; 1554 } 1555 EXPORT_SYMBOL(cxgb4_port_chan); 1556 1557 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo) 1558 { 1559 struct adapter *adap = netdev2adap(dev); 1560 u32 v1, v2, lp_count, hp_count; 1561 1562 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A); 1563 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A); 1564 if (is_t4(adap->params.chip)) { 1565 lp_count = LP_COUNT_G(v1); 1566 hp_count = HP_COUNT_G(v1); 1567 } else { 1568 lp_count = LP_COUNT_T5_G(v1); 1569 hp_count = HP_COUNT_T5_G(v2); 1570 } 1571 return lpfifo ? lp_count : hp_count; 1572 } 1573 EXPORT_SYMBOL(cxgb4_dbfifo_count); 1574 1575 /** 1576 * cxgb4_port_viid - get the VI id of a port 1577 * @dev: the net device for the port 1578 * 1579 * Return the VI id of the given port. 1580 */ 1581 unsigned int cxgb4_port_viid(const struct net_device *dev) 1582 { 1583 return netdev2pinfo(dev)->viid; 1584 } 1585 EXPORT_SYMBOL(cxgb4_port_viid); 1586 1587 /** 1588 * cxgb4_port_idx - get the index of a port 1589 * @dev: the net device for the port 1590 * 1591 * Return the index of the given port. 1592 */ 1593 unsigned int cxgb4_port_idx(const struct net_device *dev) 1594 { 1595 return netdev2pinfo(dev)->port_id; 1596 } 1597 EXPORT_SYMBOL(cxgb4_port_idx); 1598 1599 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, 1600 struct tp_tcp_stats *v6) 1601 { 1602 struct adapter *adap = pci_get_drvdata(pdev); 1603 1604 spin_lock(&adap->stats_lock); 1605 t4_tp_get_tcp_stats(adap, v4, v6); 1606 spin_unlock(&adap->stats_lock); 1607 } 1608 EXPORT_SYMBOL(cxgb4_get_tcp_stats); 1609 1610 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, 1611 const unsigned int *pgsz_order) 1612 { 1613 struct adapter *adap = netdev2adap(dev); 1614 1615 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask); 1616 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) | 1617 HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) | 1618 HPZ3_V(pgsz_order[3])); 1619 } 1620 EXPORT_SYMBOL(cxgb4_iscsi_init); 1621 1622 int cxgb4_flush_eq_cache(struct net_device *dev) 1623 { 1624 struct adapter *adap = netdev2adap(dev); 1625 1626 return t4_sge_ctxt_flush(adap, adap->mbox); 1627 } 1628 EXPORT_SYMBOL(cxgb4_flush_eq_cache); 1629 1630 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx) 1631 { 1632 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8; 1633 __be64 indices; 1634 int ret; 1635 1636 spin_lock(&adap->win0_lock); 1637 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr, 1638 sizeof(indices), (__be32 *)&indices, 1639 T4_MEMORY_READ); 1640 spin_unlock(&adap->win0_lock); 1641 if (!ret) { 1642 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff; 1643 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff; 1644 } 1645 return ret; 1646 } 1647 1648 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, 1649 u16 size) 1650 { 1651 struct adapter *adap = netdev2adap(dev); 1652 u16 hw_pidx, hw_cidx; 1653 int ret; 1654 1655 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx); 1656 if (ret) 1657 goto out; 1658 1659 if (pidx != hw_pidx) { 1660 u16 delta; 1661 u32 val; 1662 1663 if (pidx >= hw_pidx) 1664 delta = pidx - hw_pidx; 1665 else 1666 delta = size - hw_pidx + pidx; 1667 1668 if (is_t4(adap->params.chip)) 1669 val = PIDX_V(delta); 1670 else 1671 val = PIDX_T5_V(delta); 1672 wmb(); 1673 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), 1674 QID_V(qid) | val); 1675 } 1676 out: 1677 return ret; 1678 } 1679 EXPORT_SYMBOL(cxgb4_sync_txq_pidx); 1680 1681 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) 1682 { 1683 struct adapter *adap; 1684 u32 offset, memtype, memaddr; 1685 u32 edc0_size, edc1_size, mc0_size, mc1_size, size; 1686 u32 edc0_end, edc1_end, mc0_end, mc1_end; 1687 int ret; 1688 1689 adap = netdev2adap(dev); 1690 1691 offset = ((stag >> 8) * 32) + adap->vres.stag.start; 1692 1693 /* Figure out where the offset lands in the Memory Type/Address scheme. 1694 * This code assumes that the memory is laid out starting at offset 0 1695 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0 1696 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have 1697 * MC0, and some have both MC0 and MC1. 1698 */ 1699 size = t4_read_reg(adap, MA_EDRAM0_BAR_A); 1700 edc0_size = EDRAM0_SIZE_G(size) << 20; 1701 size = t4_read_reg(adap, MA_EDRAM1_BAR_A); 1702 edc1_size = EDRAM1_SIZE_G(size) << 20; 1703 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); 1704 mc0_size = EXT_MEM0_SIZE_G(size) << 20; 1705 1706 edc0_end = edc0_size; 1707 edc1_end = edc0_end + edc1_size; 1708 mc0_end = edc1_end + mc0_size; 1709 1710 if (offset < edc0_end) { 1711 memtype = MEM_EDC0; 1712 memaddr = offset; 1713 } else if (offset < edc1_end) { 1714 memtype = MEM_EDC1; 1715 memaddr = offset - edc0_end; 1716 } else { 1717 if (offset < mc0_end) { 1718 memtype = MEM_MC0; 1719 memaddr = offset - edc1_end; 1720 } else if (is_t5(adap->params.chip)) { 1721 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); 1722 mc1_size = EXT_MEM1_SIZE_G(size) << 20; 1723 mc1_end = mc0_end + mc1_size; 1724 if (offset < mc1_end) { 1725 memtype = MEM_MC1; 1726 memaddr = offset - mc0_end; 1727 } else { 1728 /* offset beyond the end of any memory */ 1729 goto err; 1730 } 1731 } else { 1732 /* T4/T6 only has a single memory channel */ 1733 goto err; 1734 } 1735 } 1736 1737 spin_lock(&adap->win0_lock); 1738 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ); 1739 spin_unlock(&adap->win0_lock); 1740 return ret; 1741 1742 err: 1743 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n", 1744 stag, offset); 1745 return -EINVAL; 1746 } 1747 EXPORT_SYMBOL(cxgb4_read_tpte); 1748 1749 u64 cxgb4_read_sge_timestamp(struct net_device *dev) 1750 { 1751 u32 hi, lo; 1752 struct adapter *adap; 1753 1754 adap = netdev2adap(dev); 1755 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A); 1756 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A)); 1757 1758 return ((u64)hi << 32) | (u64)lo; 1759 } 1760 EXPORT_SYMBOL(cxgb4_read_sge_timestamp); 1761 1762 int cxgb4_bar2_sge_qregs(struct net_device *dev, 1763 unsigned int qid, 1764 enum cxgb4_bar2_qtype qtype, 1765 int user, 1766 u64 *pbar2_qoffset, 1767 unsigned int *pbar2_qid) 1768 { 1769 return t4_bar2_sge_qregs(netdev2adap(dev), 1770 qid, 1771 (qtype == CXGB4_BAR2_QTYPE_EGRESS 1772 ? T4_BAR2_QTYPE_EGRESS 1773 : T4_BAR2_QTYPE_INGRESS), 1774 user, 1775 pbar2_qoffset, 1776 pbar2_qid); 1777 } 1778 EXPORT_SYMBOL(cxgb4_bar2_sge_qregs); 1779 1780 static struct pci_driver cxgb4_driver; 1781 1782 static void check_neigh_update(struct neighbour *neigh) 1783 { 1784 const struct device *parent; 1785 const struct net_device *netdev = neigh->dev; 1786 1787 if (is_vlan_dev(netdev)) 1788 netdev = vlan_dev_real_dev(netdev); 1789 parent = netdev->dev.parent; 1790 if (parent && parent->driver == &cxgb4_driver.driver) 1791 t4_l2t_update(dev_get_drvdata(parent), neigh); 1792 } 1793 1794 static int netevent_cb(struct notifier_block *nb, unsigned long event, 1795 void *data) 1796 { 1797 switch (event) { 1798 case NETEVENT_NEIGH_UPDATE: 1799 check_neigh_update(data); 1800 break; 1801 case NETEVENT_REDIRECT: 1802 default: 1803 break; 1804 } 1805 return 0; 1806 } 1807 1808 static bool netevent_registered; 1809 static struct notifier_block cxgb4_netevent_nb = { 1810 .notifier_call = netevent_cb 1811 }; 1812 1813 static void drain_db_fifo(struct adapter *adap, int usecs) 1814 { 1815 u32 v1, v2, lp_count, hp_count; 1816 1817 do { 1818 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A); 1819 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A); 1820 if (is_t4(adap->params.chip)) { 1821 lp_count = LP_COUNT_G(v1); 1822 hp_count = HP_COUNT_G(v1); 1823 } else { 1824 lp_count = LP_COUNT_T5_G(v1); 1825 hp_count = HP_COUNT_T5_G(v2); 1826 } 1827 1828 if (lp_count == 0 && hp_count == 0) 1829 break; 1830 set_current_state(TASK_UNINTERRUPTIBLE); 1831 schedule_timeout(usecs_to_jiffies(usecs)); 1832 } while (1); 1833 } 1834 1835 static void disable_txq_db(struct sge_txq *q) 1836 { 1837 unsigned long flags; 1838 1839 spin_lock_irqsave(&q->db_lock, flags); 1840 q->db_disabled = 1; 1841 spin_unlock_irqrestore(&q->db_lock, flags); 1842 } 1843 1844 static void enable_txq_db(struct adapter *adap, struct sge_txq *q) 1845 { 1846 spin_lock_irq(&q->db_lock); 1847 if (q->db_pidx_inc) { 1848 /* Make sure that all writes to the TX descriptors 1849 * are committed before we tell HW about them. 1850 */ 1851 wmb(); 1852 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), 1853 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc)); 1854 q->db_pidx_inc = 0; 1855 } 1856 q->db_disabled = 0; 1857 spin_unlock_irq(&q->db_lock); 1858 } 1859 1860 static void disable_dbs(struct adapter *adap) 1861 { 1862 int i; 1863 1864 for_each_ethrxq(&adap->sge, i) 1865 disable_txq_db(&adap->sge.ethtxq[i].q); 1866 if (is_offload(adap)) { 1867 struct sge_uld_txq_info *txq_info = 1868 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; 1869 1870 if (txq_info) { 1871 for_each_ofldtxq(&adap->sge, i) { 1872 struct sge_uld_txq *txq = &txq_info->uldtxq[i]; 1873 1874 disable_txq_db(&txq->q); 1875 } 1876 } 1877 } 1878 for_each_port(adap, i) 1879 disable_txq_db(&adap->sge.ctrlq[i].q); 1880 } 1881 1882 static void enable_dbs(struct adapter *adap) 1883 { 1884 int i; 1885 1886 for_each_ethrxq(&adap->sge, i) 1887 enable_txq_db(adap, &adap->sge.ethtxq[i].q); 1888 if (is_offload(adap)) { 1889 struct sge_uld_txq_info *txq_info = 1890 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; 1891 1892 if (txq_info) { 1893 for_each_ofldtxq(&adap->sge, i) { 1894 struct sge_uld_txq *txq = &txq_info->uldtxq[i]; 1895 1896 enable_txq_db(adap, &txq->q); 1897 } 1898 } 1899 } 1900 for_each_port(adap, i) 1901 enable_txq_db(adap, &adap->sge.ctrlq[i].q); 1902 } 1903 1904 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd) 1905 { 1906 enum cxgb4_uld type = CXGB4_ULD_RDMA; 1907 1908 if (adap->uld && adap->uld[type].handle) 1909 adap->uld[type].control(adap->uld[type].handle, cmd); 1910 } 1911 1912 static void process_db_full(struct work_struct *work) 1913 { 1914 struct adapter *adap; 1915 1916 adap = container_of(work, struct adapter, db_full_task); 1917 1918 drain_db_fifo(adap, dbfifo_drain_delay); 1919 enable_dbs(adap); 1920 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); 1921 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) 1922 t4_set_reg_field(adap, SGE_INT_ENABLE3_A, 1923 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 1924 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F); 1925 else 1926 t4_set_reg_field(adap, SGE_INT_ENABLE3_A, 1927 DBFIFO_LP_INT_F, DBFIFO_LP_INT_F); 1928 } 1929 1930 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) 1931 { 1932 u16 hw_pidx, hw_cidx; 1933 int ret; 1934 1935 spin_lock_irq(&q->db_lock); 1936 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx); 1937 if (ret) 1938 goto out; 1939 if (q->db_pidx != hw_pidx) { 1940 u16 delta; 1941 u32 val; 1942 1943 if (q->db_pidx >= hw_pidx) 1944 delta = q->db_pidx - hw_pidx; 1945 else 1946 delta = q->size - hw_pidx + q->db_pidx; 1947 1948 if (is_t4(adap->params.chip)) 1949 val = PIDX_V(delta); 1950 else 1951 val = PIDX_T5_V(delta); 1952 wmb(); 1953 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), 1954 QID_V(q->cntxt_id) | val); 1955 } 1956 out: 1957 q->db_disabled = 0; 1958 q->db_pidx_inc = 0; 1959 spin_unlock_irq(&q->db_lock); 1960 if (ret) 1961 CH_WARN(adap, "DB drop recovery failed.\n"); 1962 } 1963 1964 static void recover_all_queues(struct adapter *adap) 1965 { 1966 int i; 1967 1968 for_each_ethrxq(&adap->sge, i) 1969 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q); 1970 if (is_offload(adap)) { 1971 struct sge_uld_txq_info *txq_info = 1972 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; 1973 if (txq_info) { 1974 for_each_ofldtxq(&adap->sge, i) { 1975 struct sge_uld_txq *txq = &txq_info->uldtxq[i]; 1976 1977 sync_txq_pidx(adap, &txq->q); 1978 } 1979 } 1980 } 1981 for_each_port(adap, i) 1982 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); 1983 } 1984 1985 static void process_db_drop(struct work_struct *work) 1986 { 1987 struct adapter *adap; 1988 1989 adap = container_of(work, struct adapter, db_drop_task); 1990 1991 if (is_t4(adap->params.chip)) { 1992 drain_db_fifo(adap, dbfifo_drain_delay); 1993 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); 1994 drain_db_fifo(adap, dbfifo_drain_delay); 1995 recover_all_queues(adap); 1996 drain_db_fifo(adap, dbfifo_drain_delay); 1997 enable_dbs(adap); 1998 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); 1999 } else if (is_t5(adap->params.chip)) { 2000 u32 dropped_db = t4_read_reg(adap, 0x010ac); 2001 u16 qid = (dropped_db >> 15) & 0x1ffff; 2002 u16 pidx_inc = dropped_db & 0x1fff; 2003 u64 bar2_qoffset; 2004 unsigned int bar2_qid; 2005 int ret; 2006 2007 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS, 2008 0, &bar2_qoffset, &bar2_qid); 2009 if (ret) 2010 dev_err(adap->pdev_dev, "doorbell drop recovery: " 2011 "qid=%d, pidx_inc=%d\n", qid, pidx_inc); 2012 else 2013 writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid), 2014 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL); 2015 2016 /* Re-enable BAR2 WC */ 2017 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15); 2018 } 2019 2020 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) 2021 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0); 2022 } 2023 2024 void t4_db_full(struct adapter *adap) 2025 { 2026 if (is_t4(adap->params.chip)) { 2027 disable_dbs(adap); 2028 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); 2029 t4_set_reg_field(adap, SGE_INT_ENABLE3_A, 2030 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0); 2031 queue_work(adap->workq, &adap->db_full_task); 2032 } 2033 } 2034 2035 void t4_db_dropped(struct adapter *adap) 2036 { 2037 if (is_t4(adap->params.chip)) { 2038 disable_dbs(adap); 2039 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); 2040 } 2041 queue_work(adap->workq, &adap->db_drop_task); 2042 } 2043 2044 void t4_register_netevent_notifier(void) 2045 { 2046 if (!netevent_registered) { 2047 register_netevent_notifier(&cxgb4_netevent_nb); 2048 netevent_registered = true; 2049 } 2050 } 2051 2052 static void detach_ulds(struct adapter *adap) 2053 { 2054 unsigned int i; 2055 2056 mutex_lock(&uld_mutex); 2057 list_del(&adap->list_node); 2058 for (i = 0; i < CXGB4_ULD_MAX; i++) 2059 if (adap->uld && adap->uld[i].handle) { 2060 adap->uld[i].state_change(adap->uld[i].handle, 2061 CXGB4_STATE_DETACH); 2062 adap->uld[i].handle = NULL; 2063 } 2064 if (netevent_registered && list_empty(&adapter_list)) { 2065 unregister_netevent_notifier(&cxgb4_netevent_nb); 2066 netevent_registered = false; 2067 } 2068 mutex_unlock(&uld_mutex); 2069 } 2070 2071 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) 2072 { 2073 unsigned int i; 2074 2075 mutex_lock(&uld_mutex); 2076 for (i = 0; i < CXGB4_ULD_MAX; i++) 2077 if (adap->uld && adap->uld[i].handle) 2078 adap->uld[i].state_change(adap->uld[i].handle, 2079 new_state); 2080 mutex_unlock(&uld_mutex); 2081 } 2082 2083 #if IS_ENABLED(CONFIG_IPV6) 2084 static int cxgb4_inet6addr_handler(struct notifier_block *this, 2085 unsigned long event, void *data) 2086 { 2087 struct inet6_ifaddr *ifa = data; 2088 struct net_device *event_dev = ifa->idev->dev; 2089 const struct device *parent = NULL; 2090 #if IS_ENABLED(CONFIG_BONDING) 2091 struct adapter *adap; 2092 #endif 2093 if (is_vlan_dev(event_dev)) 2094 event_dev = vlan_dev_real_dev(event_dev); 2095 #if IS_ENABLED(CONFIG_BONDING) 2096 if (event_dev->flags & IFF_MASTER) { 2097 list_for_each_entry(adap, &adapter_list, list_node) { 2098 switch (event) { 2099 case NETDEV_UP: 2100 cxgb4_clip_get(adap->port[0], 2101 (const u32 *)ifa, 1); 2102 break; 2103 case NETDEV_DOWN: 2104 cxgb4_clip_release(adap->port[0], 2105 (const u32 *)ifa, 1); 2106 break; 2107 default: 2108 break; 2109 } 2110 } 2111 return NOTIFY_OK; 2112 } 2113 #endif 2114 2115 if (event_dev) 2116 parent = event_dev->dev.parent; 2117 2118 if (parent && parent->driver == &cxgb4_driver.driver) { 2119 switch (event) { 2120 case NETDEV_UP: 2121 cxgb4_clip_get(event_dev, (const u32 *)ifa, 1); 2122 break; 2123 case NETDEV_DOWN: 2124 cxgb4_clip_release(event_dev, (const u32 *)ifa, 1); 2125 break; 2126 default: 2127 break; 2128 } 2129 } 2130 return NOTIFY_OK; 2131 } 2132 2133 static bool inet6addr_registered; 2134 static struct notifier_block cxgb4_inet6addr_notifier = { 2135 .notifier_call = cxgb4_inet6addr_handler 2136 }; 2137 2138 static void update_clip(const struct adapter *adap) 2139 { 2140 int i; 2141 struct net_device *dev; 2142 int ret; 2143 2144 rcu_read_lock(); 2145 2146 for (i = 0; i < MAX_NPORTS; i++) { 2147 dev = adap->port[i]; 2148 ret = 0; 2149 2150 if (dev) 2151 ret = cxgb4_update_root_dev_clip(dev); 2152 2153 if (ret < 0) 2154 break; 2155 } 2156 rcu_read_unlock(); 2157 } 2158 #endif /* IS_ENABLED(CONFIG_IPV6) */ 2159 2160 /** 2161 * cxgb_up - enable the adapter 2162 * @adap: adapter being enabled 2163 * 2164 * Called when the first port is enabled, this function performs the 2165 * actions necessary to make an adapter operational, such as completing 2166 * the initialization of HW modules, and enabling interrupts. 2167 * 2168 * Must be called with the rtnl lock held. 2169 */ 2170 static int cxgb_up(struct adapter *adap) 2171 { 2172 int err; 2173 2174 err = setup_sge_queues(adap); 2175 if (err) 2176 goto out; 2177 err = setup_rss(adap); 2178 if (err) 2179 goto freeq; 2180 2181 if (adap->flags & USING_MSIX) { 2182 name_msix_vecs(adap); 2183 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0, 2184 adap->msix_info[0].desc, adap); 2185 if (err) 2186 goto irq_err; 2187 err = request_msix_queue_irqs(adap); 2188 if (err) { 2189 free_irq(adap->msix_info[0].vec, adap); 2190 goto irq_err; 2191 } 2192 } else { 2193 err = request_irq(adap->pdev->irq, t4_intr_handler(adap), 2194 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED, 2195 adap->port[0]->name, adap); 2196 if (err) 2197 goto irq_err; 2198 } 2199 enable_rx(adap); 2200 t4_sge_start(adap); 2201 t4_intr_enable(adap); 2202 adap->flags |= FULL_INIT_DONE; 2203 notify_ulds(adap, CXGB4_STATE_UP); 2204 #if IS_ENABLED(CONFIG_IPV6) 2205 update_clip(adap); 2206 #endif 2207 /* Initialize hash mac addr list*/ 2208 INIT_LIST_HEAD(&adap->mac_hlist); 2209 out: 2210 return err; 2211 irq_err: 2212 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); 2213 freeq: 2214 t4_free_sge_resources(adap); 2215 goto out; 2216 } 2217 2218 static void cxgb_down(struct adapter *adapter) 2219 { 2220 cancel_work_sync(&adapter->tid_release_task); 2221 cancel_work_sync(&adapter->db_full_task); 2222 cancel_work_sync(&adapter->db_drop_task); 2223 adapter->tid_release_task_busy = false; 2224 adapter->tid_release_head = NULL; 2225 2226 t4_sge_stop(adapter); 2227 t4_free_sge_resources(adapter); 2228 adapter->flags &= ~FULL_INIT_DONE; 2229 } 2230 2231 /* 2232 * net_device operations 2233 */ 2234 static int cxgb_open(struct net_device *dev) 2235 { 2236 int err; 2237 struct port_info *pi = netdev_priv(dev); 2238 struct adapter *adapter = pi->adapter; 2239 2240 netif_carrier_off(dev); 2241 2242 if (!(adapter->flags & FULL_INIT_DONE)) { 2243 err = cxgb_up(adapter); 2244 if (err < 0) 2245 return err; 2246 } 2247 2248 err = link_start(dev); 2249 if (!err) 2250 netif_tx_start_all_queues(dev); 2251 return err; 2252 } 2253 2254 static int cxgb_close(struct net_device *dev) 2255 { 2256 struct port_info *pi = netdev_priv(dev); 2257 struct adapter *adapter = pi->adapter; 2258 2259 netif_tx_stop_all_queues(dev); 2260 netif_carrier_off(dev); 2261 return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false); 2262 } 2263 2264 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, 2265 __be32 sip, __be16 sport, __be16 vlan, 2266 unsigned int queue, unsigned char port, unsigned char mask) 2267 { 2268 int ret; 2269 struct filter_entry *f; 2270 struct adapter *adap; 2271 int i; 2272 u8 *val; 2273 2274 adap = netdev2adap(dev); 2275 2276 /* Adjust stid to correct filter index */ 2277 stid -= adap->tids.sftid_base; 2278 stid += adap->tids.nftids; 2279 2280 /* Check to make sure the filter requested is writable ... 2281 */ 2282 f = &adap->tids.ftid_tab[stid]; 2283 ret = writable_filter(f); 2284 if (ret) 2285 return ret; 2286 2287 /* Clear out any old resources being used by the filter before 2288 * we start constructing the new filter. 2289 */ 2290 if (f->valid) 2291 clear_filter(adap, f); 2292 2293 /* Clear out filter specifications */ 2294 memset(&f->fs, 0, sizeof(struct ch_filter_specification)); 2295 f->fs.val.lport = cpu_to_be16(sport); 2296 f->fs.mask.lport = ~0; 2297 val = (u8 *)&sip; 2298 if ((val[0] | val[1] | val[2] | val[3]) != 0) { 2299 for (i = 0; i < 4; i++) { 2300 f->fs.val.lip[i] = val[i]; 2301 f->fs.mask.lip[i] = ~0; 2302 } 2303 if (adap->params.tp.vlan_pri_map & PORT_F) { 2304 f->fs.val.iport = port; 2305 f->fs.mask.iport = mask; 2306 } 2307 } 2308 2309 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) { 2310 f->fs.val.proto = IPPROTO_TCP; 2311 f->fs.mask.proto = ~0; 2312 } 2313 2314 f->fs.dirsteer = 1; 2315 f->fs.iq = queue; 2316 /* Mark filter as locked */ 2317 f->locked = 1; 2318 f->fs.rpttid = 1; 2319 2320 /* Save the actual tid. We need this to get the corresponding 2321 * filter entry structure in filter_rpl. 2322 */ 2323 f->tid = stid + adap->tids.ftid_base; 2324 ret = set_filter_wr(adap, stid); 2325 if (ret) { 2326 clear_filter(adap, f); 2327 return ret; 2328 } 2329 2330 return 0; 2331 } 2332 EXPORT_SYMBOL(cxgb4_create_server_filter); 2333 2334 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, 2335 unsigned int queue, bool ipv6) 2336 { 2337 struct filter_entry *f; 2338 struct adapter *adap; 2339 2340 adap = netdev2adap(dev); 2341 2342 /* Adjust stid to correct filter index */ 2343 stid -= adap->tids.sftid_base; 2344 stid += adap->tids.nftids; 2345 2346 f = &adap->tids.ftid_tab[stid]; 2347 /* Unlock the filter */ 2348 f->locked = 0; 2349 2350 return delete_filter(adap, stid); 2351 } 2352 EXPORT_SYMBOL(cxgb4_remove_server_filter); 2353 2354 static void cxgb_get_stats(struct net_device *dev, 2355 struct rtnl_link_stats64 *ns) 2356 { 2357 struct port_stats stats; 2358 struct port_info *p = netdev_priv(dev); 2359 struct adapter *adapter = p->adapter; 2360 2361 /* Block retrieving statistics during EEH error 2362 * recovery. Otherwise, the recovery might fail 2363 * and the PCI device will be removed permanently 2364 */ 2365 spin_lock(&adapter->stats_lock); 2366 if (!netif_device_present(dev)) { 2367 spin_unlock(&adapter->stats_lock); 2368 return; 2369 } 2370 t4_get_port_stats_offset(adapter, p->tx_chan, &stats, 2371 &p->stats_base); 2372 spin_unlock(&adapter->stats_lock); 2373 2374 ns->tx_bytes = stats.tx_octets; 2375 ns->tx_packets = stats.tx_frames; 2376 ns->rx_bytes = stats.rx_octets; 2377 ns->rx_packets = stats.rx_frames; 2378 ns->multicast = stats.rx_mcast_frames; 2379 2380 /* detailed rx_errors */ 2381 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long + 2382 stats.rx_runt; 2383 ns->rx_over_errors = 0; 2384 ns->rx_crc_errors = stats.rx_fcs_err; 2385 ns->rx_frame_errors = stats.rx_symbol_err; 2386 ns->rx_dropped = stats.rx_ovflow0 + stats.rx_ovflow1 + 2387 stats.rx_ovflow2 + stats.rx_ovflow3 + 2388 stats.rx_trunc0 + stats.rx_trunc1 + 2389 stats.rx_trunc2 + stats.rx_trunc3; 2390 ns->rx_missed_errors = 0; 2391 2392 /* detailed tx_errors */ 2393 ns->tx_aborted_errors = 0; 2394 ns->tx_carrier_errors = 0; 2395 ns->tx_fifo_errors = 0; 2396 ns->tx_heartbeat_errors = 0; 2397 ns->tx_window_errors = 0; 2398 2399 ns->tx_errors = stats.tx_error_frames; 2400 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err + 2401 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors; 2402 } 2403 2404 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 2405 { 2406 unsigned int mbox; 2407 int ret = 0, prtad, devad; 2408 struct port_info *pi = netdev_priv(dev); 2409 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; 2410 2411 switch (cmd) { 2412 case SIOCGMIIPHY: 2413 if (pi->mdio_addr < 0) 2414 return -EOPNOTSUPP; 2415 data->phy_id = pi->mdio_addr; 2416 break; 2417 case SIOCGMIIREG: 2418 case SIOCSMIIREG: 2419 if (mdio_phy_id_is_c45(data->phy_id)) { 2420 prtad = mdio_phy_id_prtad(data->phy_id); 2421 devad = mdio_phy_id_devad(data->phy_id); 2422 } else if (data->phy_id < 32) { 2423 prtad = data->phy_id; 2424 devad = 0; 2425 data->reg_num &= 0x1f; 2426 } else 2427 return -EINVAL; 2428 2429 mbox = pi->adapter->pf; 2430 if (cmd == SIOCGMIIREG) 2431 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad, 2432 data->reg_num, &data->val_out); 2433 else 2434 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad, 2435 data->reg_num, data->val_in); 2436 break; 2437 case SIOCGHWTSTAMP: 2438 return copy_to_user(req->ifr_data, &pi->tstamp_config, 2439 sizeof(pi->tstamp_config)) ? 2440 -EFAULT : 0; 2441 case SIOCSHWTSTAMP: 2442 if (copy_from_user(&pi->tstamp_config, req->ifr_data, 2443 sizeof(pi->tstamp_config))) 2444 return -EFAULT; 2445 2446 switch (pi->tstamp_config.rx_filter) { 2447 case HWTSTAMP_FILTER_NONE: 2448 pi->rxtstamp = false; 2449 break; 2450 case HWTSTAMP_FILTER_ALL: 2451 pi->rxtstamp = true; 2452 break; 2453 default: 2454 pi->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; 2455 return -ERANGE; 2456 } 2457 2458 return copy_to_user(req->ifr_data, &pi->tstamp_config, 2459 sizeof(pi->tstamp_config)) ? 2460 -EFAULT : 0; 2461 default: 2462 return -EOPNOTSUPP; 2463 } 2464 return ret; 2465 } 2466 2467 static void cxgb_set_rxmode(struct net_device *dev) 2468 { 2469 /* unfortunately we can't return errors to the stack */ 2470 set_rxmode(dev, -1, false); 2471 } 2472 2473 static int cxgb_change_mtu(struct net_device *dev, int new_mtu) 2474 { 2475 int ret; 2476 struct port_info *pi = netdev_priv(dev); 2477 2478 ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1, 2479 -1, -1, -1, true); 2480 if (!ret) 2481 dev->mtu = new_mtu; 2482 return ret; 2483 } 2484 2485 #ifdef CONFIG_PCI_IOV 2486 static int dummy_open(struct net_device *dev) 2487 { 2488 /* Turn carrier off since we don't have to transmit anything on this 2489 * interface. 2490 */ 2491 netif_carrier_off(dev); 2492 return 0; 2493 } 2494 2495 /* Fill MAC address that will be assigned by the FW */ 2496 static void fill_vf_station_mac_addr(struct adapter *adap) 2497 { 2498 unsigned int i; 2499 u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN]; 2500 int err; 2501 u8 *na; 2502 u16 a, b; 2503 2504 err = t4_get_raw_vpd_params(adap, &adap->params.vpd); 2505 if (!err) { 2506 na = adap->params.vpd.na; 2507 for (i = 0; i < ETH_ALEN; i++) 2508 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 + 2509 hex2val(na[2 * i + 1])); 2510 a = (hw_addr[0] << 8) | hw_addr[1]; 2511 b = (hw_addr[1] << 8) | hw_addr[2]; 2512 a ^= b; 2513 a |= 0x0200; /* locally assigned Ethernet MAC address */ 2514 a &= ~0x0100; /* not a multicast Ethernet MAC address */ 2515 macaddr[0] = a >> 8; 2516 macaddr[1] = a & 0xff; 2517 2518 for (i = 2; i < 5; i++) 2519 macaddr[i] = hw_addr[i + 1]; 2520 2521 for (i = 0; i < adap->num_vfs; i++) { 2522 macaddr[5] = adap->pf * 16 + i; 2523 ether_addr_copy(adap->vfinfo[i].vf_mac_addr, macaddr); 2524 } 2525 } 2526 } 2527 2528 static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac) 2529 { 2530 struct port_info *pi = netdev_priv(dev); 2531 struct adapter *adap = pi->adapter; 2532 int ret; 2533 2534 /* verify MAC addr is valid */ 2535 if (!is_valid_ether_addr(mac)) { 2536 dev_err(pi->adapter->pdev_dev, 2537 "Invalid Ethernet address %pM for VF %d\n", 2538 mac, vf); 2539 return -EINVAL; 2540 } 2541 2542 dev_info(pi->adapter->pdev_dev, 2543 "Setting MAC %pM on VF %d\n", mac, vf); 2544 ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac); 2545 if (!ret) 2546 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac); 2547 return ret; 2548 } 2549 2550 static int cxgb_get_vf_config(struct net_device *dev, 2551 int vf, struct ifla_vf_info *ivi) 2552 { 2553 struct port_info *pi = netdev_priv(dev); 2554 struct adapter *adap = pi->adapter; 2555 2556 if (vf >= adap->num_vfs) 2557 return -EINVAL; 2558 ivi->vf = vf; 2559 ether_addr_copy(ivi->mac, adap->vfinfo[vf].vf_mac_addr); 2560 return 0; 2561 } 2562 2563 static int cxgb_get_phys_port_id(struct net_device *dev, 2564 struct netdev_phys_item_id *ppid) 2565 { 2566 struct port_info *pi = netdev_priv(dev); 2567 unsigned int phy_port_id; 2568 2569 phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id; 2570 ppid->id_len = sizeof(phy_port_id); 2571 memcpy(ppid->id, &phy_port_id, ppid->id_len); 2572 return 0; 2573 } 2574 2575 #endif 2576 2577 static int cxgb_set_mac_addr(struct net_device *dev, void *p) 2578 { 2579 int ret; 2580 struct sockaddr *addr = p; 2581 struct port_info *pi = netdev_priv(dev); 2582 2583 if (!is_valid_ether_addr(addr->sa_data)) 2584 return -EADDRNOTAVAIL; 2585 2586 ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid, 2587 pi->xact_addr_filt, addr->sa_data, true, true); 2588 if (ret < 0) 2589 return ret; 2590 2591 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 2592 pi->xact_addr_filt = ret; 2593 return 0; 2594 } 2595 2596 #ifdef CONFIG_NET_POLL_CONTROLLER 2597 static void cxgb_netpoll(struct net_device *dev) 2598 { 2599 struct port_info *pi = netdev_priv(dev); 2600 struct adapter *adap = pi->adapter; 2601 2602 if (adap->flags & USING_MSIX) { 2603 int i; 2604 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; 2605 2606 for (i = pi->nqsets; i; i--, rx++) 2607 t4_sge_intr_msix(0, &rx->rspq); 2608 } else 2609 t4_intr_handler(adap)(0, adap); 2610 } 2611 #endif 2612 2613 static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) 2614 { 2615 struct port_info *pi = netdev_priv(dev); 2616 struct adapter *adap = pi->adapter; 2617 struct sched_class *e; 2618 struct ch_sched_params p; 2619 struct ch_sched_queue qe; 2620 u32 req_rate; 2621 int err = 0; 2622 2623 if (!can_sched(dev)) 2624 return -ENOTSUPP; 2625 2626 if (index < 0 || index > pi->nqsets - 1) 2627 return -EINVAL; 2628 2629 if (!(adap->flags & FULL_INIT_DONE)) { 2630 dev_err(adap->pdev_dev, 2631 "Failed to rate limit on queue %d. Link Down?\n", 2632 index); 2633 return -EINVAL; 2634 } 2635 2636 /* Convert from Mbps to Kbps */ 2637 req_rate = rate << 10; 2638 2639 /* Max rate is 10 Gbps */ 2640 if (req_rate >= SCHED_MAX_RATE_KBPS) { 2641 dev_err(adap->pdev_dev, 2642 "Invalid rate %u Mbps, Max rate is %u Gbps\n", 2643 rate, SCHED_MAX_RATE_KBPS); 2644 return -ERANGE; 2645 } 2646 2647 /* First unbind the queue from any existing class */ 2648 memset(&qe, 0, sizeof(qe)); 2649 qe.queue = index; 2650 qe.class = SCHED_CLS_NONE; 2651 2652 err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE); 2653 if (err) { 2654 dev_err(adap->pdev_dev, 2655 "Unbinding Queue %d on port %d fail. Err: %d\n", 2656 index, pi->port_id, err); 2657 return err; 2658 } 2659 2660 /* Queue already unbound */ 2661 if (!req_rate) 2662 return 0; 2663 2664 /* Fetch any available unused or matching scheduling class */ 2665 memset(&p, 0, sizeof(p)); 2666 p.type = SCHED_CLASS_TYPE_PACKET; 2667 p.u.params.level = SCHED_CLASS_LEVEL_CL_RL; 2668 p.u.params.mode = SCHED_CLASS_MODE_CLASS; 2669 p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS; 2670 p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS; 2671 p.u.params.channel = pi->tx_chan; 2672 p.u.params.class = SCHED_CLS_NONE; 2673 p.u.params.minrate = 0; 2674 p.u.params.maxrate = req_rate; 2675 p.u.params.weight = 0; 2676 p.u.params.pktsize = dev->mtu; 2677 2678 e = cxgb4_sched_class_alloc(dev, &p); 2679 if (!e) 2680 return -ENOMEM; 2681 2682 /* Bind the queue to a scheduling class */ 2683 memset(&qe, 0, sizeof(qe)); 2684 qe.queue = index; 2685 qe.class = e->idx; 2686 2687 err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE); 2688 if (err) 2689 dev_err(adap->pdev_dev, 2690 "Queue rate limiting failed. Err: %d\n", err); 2691 return err; 2692 } 2693 2694 static int cxgb_setup_tc(struct net_device *dev, u32 handle, __be16 proto, 2695 struct tc_to_netdev *tc) 2696 { 2697 struct port_info *pi = netdev2pinfo(dev); 2698 struct adapter *adap = netdev2adap(dev); 2699 2700 if (!(adap->flags & FULL_INIT_DONE)) { 2701 dev_err(adap->pdev_dev, 2702 "Failed to setup tc on port %d. Link Down?\n", 2703 pi->port_id); 2704 return -EINVAL; 2705 } 2706 2707 if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) && 2708 tc->type == TC_SETUP_CLSU32) { 2709 switch (tc->cls_u32->command) { 2710 case TC_CLSU32_NEW_KNODE: 2711 case TC_CLSU32_REPLACE_KNODE: 2712 return cxgb4_config_knode(dev, proto, tc->cls_u32); 2713 case TC_CLSU32_DELETE_KNODE: 2714 return cxgb4_delete_knode(dev, proto, tc->cls_u32); 2715 default: 2716 return -EOPNOTSUPP; 2717 } 2718 } 2719 2720 return -EOPNOTSUPP; 2721 } 2722 2723 static const struct net_device_ops cxgb4_netdev_ops = { 2724 .ndo_open = cxgb_open, 2725 .ndo_stop = cxgb_close, 2726 .ndo_start_xmit = t4_eth_xmit, 2727 .ndo_select_queue = cxgb_select_queue, 2728 .ndo_get_stats64 = cxgb_get_stats, 2729 .ndo_set_rx_mode = cxgb_set_rxmode, 2730 .ndo_set_mac_address = cxgb_set_mac_addr, 2731 .ndo_set_features = cxgb_set_features, 2732 .ndo_validate_addr = eth_validate_addr, 2733 .ndo_do_ioctl = cxgb_ioctl, 2734 .ndo_change_mtu = cxgb_change_mtu, 2735 #ifdef CONFIG_NET_POLL_CONTROLLER 2736 .ndo_poll_controller = cxgb_netpoll, 2737 #endif 2738 #ifdef CONFIG_CHELSIO_T4_FCOE 2739 .ndo_fcoe_enable = cxgb_fcoe_enable, 2740 .ndo_fcoe_disable = cxgb_fcoe_disable, 2741 #endif /* CONFIG_CHELSIO_T4_FCOE */ 2742 .ndo_set_tx_maxrate = cxgb_set_tx_maxrate, 2743 .ndo_setup_tc = cxgb_setup_tc, 2744 }; 2745 2746 #ifdef CONFIG_PCI_IOV 2747 static const struct net_device_ops cxgb4_mgmt_netdev_ops = { 2748 .ndo_open = dummy_open, 2749 .ndo_set_vf_mac = cxgb_set_vf_mac, 2750 .ndo_get_vf_config = cxgb_get_vf_config, 2751 .ndo_get_phys_port_id = cxgb_get_phys_port_id, 2752 }; 2753 #endif 2754 2755 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2756 { 2757 struct adapter *adapter = netdev2adap(dev); 2758 2759 strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver)); 2760 strlcpy(info->version, cxgb4_driver_version, 2761 sizeof(info->version)); 2762 strlcpy(info->bus_info, pci_name(adapter->pdev), 2763 sizeof(info->bus_info)); 2764 } 2765 2766 static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = { 2767 .get_drvinfo = get_drvinfo, 2768 }; 2769 2770 void t4_fatal_err(struct adapter *adap) 2771 { 2772 int port; 2773 2774 /* Disable the SGE since ULDs are going to free resources that 2775 * could be exposed to the adapter. RDMA MWs for example... 2776 */ 2777 t4_shutdown_adapter(adap); 2778 for_each_port(adap, port) { 2779 struct net_device *dev = adap->port[port]; 2780 2781 /* If we get here in very early initialization the network 2782 * devices may not have been set up yet. 2783 */ 2784 if (!dev) 2785 continue; 2786 2787 netif_tx_stop_all_queues(dev); 2788 netif_carrier_off(dev); 2789 } 2790 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); 2791 } 2792 2793 static void setup_memwin(struct adapter *adap) 2794 { 2795 u32 nic_win_base = t4_get_util_window(adap); 2796 2797 t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC); 2798 } 2799 2800 static void setup_memwin_rdma(struct adapter *adap) 2801 { 2802 if (adap->vres.ocq.size) { 2803 u32 start; 2804 unsigned int sz_kb; 2805 2806 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2); 2807 start &= PCI_BASE_ADDRESS_MEM_MASK; 2808 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres); 2809 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10; 2810 t4_write_reg(adap, 2811 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3), 2812 start | BIR_V(1) | WINDOW_V(ilog2(sz_kb))); 2813 t4_write_reg(adap, 2814 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3), 2815 adap->vres.ocq.start); 2816 t4_read_reg(adap, 2817 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3)); 2818 } 2819 } 2820 2821 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) 2822 { 2823 u32 v; 2824 int ret; 2825 2826 /* get device capabilities */ 2827 memset(c, 0, sizeof(*c)); 2828 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | 2829 FW_CMD_REQUEST_F | FW_CMD_READ_F); 2830 c->cfvalid_to_len16 = htonl(FW_LEN16(*c)); 2831 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c); 2832 if (ret < 0) 2833 return ret; 2834 2835 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | 2836 FW_CMD_REQUEST_F | FW_CMD_WRITE_F); 2837 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL); 2838 if (ret < 0) 2839 return ret; 2840 2841 ret = t4_config_glbl_rss(adap, adap->pf, 2842 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, 2843 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F | 2844 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F); 2845 if (ret < 0) 2846 return ret; 2847 2848 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64, 2849 MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, 2850 FW_CMD_CAP_PF); 2851 if (ret < 0) 2852 return ret; 2853 2854 t4_sge_init(adap); 2855 2856 /* tweak some settings */ 2857 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849); 2858 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12)); 2859 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A); 2860 v = t4_read_reg(adap, TP_PIO_DATA_A); 2861 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F); 2862 2863 /* first 4 Tx modulation queues point to consecutive Tx channels */ 2864 adap->params.tp.tx_modq_map = 0xE4; 2865 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A, 2866 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map)); 2867 2868 /* associate each Tx modulation queue with consecutive Tx channels */ 2869 v = 0x84218421; 2870 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, 2871 &v, 1, TP_TX_SCHED_HDR_A); 2872 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, 2873 &v, 1, TP_TX_SCHED_FIFO_A); 2874 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, 2875 &v, 1, TP_TX_SCHED_PCMD_A); 2876 2877 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */ 2878 if (is_offload(adap)) { 2879 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A, 2880 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 2881 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 2882 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 2883 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT)); 2884 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A, 2885 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 2886 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 2887 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 2888 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT)); 2889 } 2890 2891 /* get basic stuff going */ 2892 return t4_early_init(adap, adap->pf); 2893 } 2894 2895 /* 2896 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower. 2897 */ 2898 #define MAX_ATIDS 8192U 2899 2900 /* 2901 * Phase 0 of initialization: contact FW, obtain config, perform basic init. 2902 * 2903 * If the firmware we're dealing with has Configuration File support, then 2904 * we use that to perform all configuration 2905 */ 2906 2907 /* 2908 * Tweak configuration based on module parameters, etc. Most of these have 2909 * defaults assigned to them by Firmware Configuration Files (if we're using 2910 * them) but need to be explicitly set if we're using hard-coded 2911 * initialization. But even in the case of using Firmware Configuration 2912 * Files, we'd like to expose the ability to change these via module 2913 * parameters so these are essentially common tweaks/settings for 2914 * Configuration Files and hard-coded initialization ... 2915 */ 2916 static int adap_init0_tweaks(struct adapter *adapter) 2917 { 2918 /* 2919 * Fix up various Host-Dependent Parameters like Page Size, Cache 2920 * Line Size, etc. The firmware default is for a 4KB Page Size and 2921 * 64B Cache Line Size ... 2922 */ 2923 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES); 2924 2925 /* 2926 * Process module parameters which affect early initialization. 2927 */ 2928 if (rx_dma_offset != 2 && rx_dma_offset != 0) { 2929 dev_err(&adapter->pdev->dev, 2930 "Ignoring illegal rx_dma_offset=%d, using 2\n", 2931 rx_dma_offset); 2932 rx_dma_offset = 2; 2933 } 2934 t4_set_reg_field(adapter, SGE_CONTROL_A, 2935 PKTSHIFT_V(PKTSHIFT_M), 2936 PKTSHIFT_V(rx_dma_offset)); 2937 2938 /* 2939 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux 2940 * adds the pseudo header itself. 2941 */ 2942 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A, 2943 CSUM_HAS_PSEUDO_HDR_F, 0); 2944 2945 return 0; 2946 } 2947 2948 /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips 2949 * unto themselves and they contain their own firmware to perform their 2950 * tasks ... 2951 */ 2952 static int phy_aq1202_version(const u8 *phy_fw_data, 2953 size_t phy_fw_size) 2954 { 2955 int offset; 2956 2957 /* At offset 0x8 you're looking for the primary image's 2958 * starting offset which is 3 Bytes wide 2959 * 2960 * At offset 0xa of the primary image, you look for the offset 2961 * of the DRAM segment which is 3 Bytes wide. 2962 * 2963 * The FW version is at offset 0x27e of the DRAM and is 2 Bytes 2964 * wide 2965 */ 2966 #define be16(__p) (((__p)[0] << 8) | (__p)[1]) 2967 #define le16(__p) ((__p)[0] | ((__p)[1] << 8)) 2968 #define le24(__p) (le16(__p) | ((__p)[2] << 16)) 2969 2970 offset = le24(phy_fw_data + 0x8) << 12; 2971 offset = le24(phy_fw_data + offset + 0xa); 2972 return be16(phy_fw_data + offset + 0x27e); 2973 2974 #undef be16 2975 #undef le16 2976 #undef le24 2977 } 2978 2979 static struct info_10gbt_phy_fw { 2980 unsigned int phy_fw_id; /* PCI Device ID */ 2981 char *phy_fw_file; /* /lib/firmware/ PHY Firmware file */ 2982 int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size); 2983 int phy_flash; /* Has FLASH for PHY Firmware */ 2984 } phy_info_array[] = { 2985 { 2986 PHY_AQ1202_DEVICEID, 2987 PHY_AQ1202_FIRMWARE, 2988 phy_aq1202_version, 2989 1, 2990 }, 2991 { 2992 PHY_BCM84834_DEVICEID, 2993 PHY_BCM84834_FIRMWARE, 2994 NULL, 2995 0, 2996 }, 2997 { 0, NULL, NULL }, 2998 }; 2999 3000 static struct info_10gbt_phy_fw *find_phy_info(int devid) 3001 { 3002 int i; 3003 3004 for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) { 3005 if (phy_info_array[i].phy_fw_id == devid) 3006 return &phy_info_array[i]; 3007 } 3008 return NULL; 3009 } 3010 3011 /* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to 3012 * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error 3013 * we return a negative error number. If we transfer new firmware we return 1 3014 * (from t4_load_phy_fw()). If we don't do anything we return 0. 3015 */ 3016 static int adap_init0_phy(struct adapter *adap) 3017 { 3018 const struct firmware *phyf; 3019 int ret; 3020 struct info_10gbt_phy_fw *phy_info; 3021 3022 /* Use the device ID to determine which PHY file to flash. 3023 */ 3024 phy_info = find_phy_info(adap->pdev->device); 3025 if (!phy_info) { 3026 dev_warn(adap->pdev_dev, 3027 "No PHY Firmware file found for this PHY\n"); 3028 return -EOPNOTSUPP; 3029 } 3030 3031 /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then 3032 * use that. The adapter firmware provides us with a memory buffer 3033 * where we can load a PHY firmware file from the host if we want to 3034 * override the PHY firmware File in flash. 3035 */ 3036 ret = request_firmware_direct(&phyf, phy_info->phy_fw_file, 3037 adap->pdev_dev); 3038 if (ret < 0) { 3039 /* For adapters without FLASH attached to PHY for their 3040 * firmware, it's obviously a fatal error if we can't get the 3041 * firmware to the adapter. For adapters with PHY firmware 3042 * FLASH storage, it's worth a warning if we can't find the 3043 * PHY Firmware but we'll neuter the error ... 3044 */ 3045 dev_err(adap->pdev_dev, "unable to find PHY Firmware image " 3046 "/lib/firmware/%s, error %d\n", 3047 phy_info->phy_fw_file, -ret); 3048 if (phy_info->phy_flash) { 3049 int cur_phy_fw_ver = 0; 3050 3051 t4_phy_fw_ver(adap, &cur_phy_fw_ver); 3052 dev_warn(adap->pdev_dev, "continuing with, on-adapter " 3053 "FLASH copy, version %#x\n", cur_phy_fw_ver); 3054 ret = 0; 3055 } 3056 3057 return ret; 3058 } 3059 3060 /* Load PHY Firmware onto adapter. 3061 */ 3062 ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock, 3063 phy_info->phy_fw_version, 3064 (u8 *)phyf->data, phyf->size); 3065 if (ret < 0) 3066 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n", 3067 -ret); 3068 else if (ret > 0) { 3069 int new_phy_fw_ver = 0; 3070 3071 if (phy_info->phy_fw_version) 3072 new_phy_fw_ver = phy_info->phy_fw_version(phyf->data, 3073 phyf->size); 3074 dev_info(adap->pdev_dev, "Successfully transferred PHY " 3075 "Firmware /lib/firmware/%s, version %#x\n", 3076 phy_info->phy_fw_file, new_phy_fw_ver); 3077 } 3078 3079 release_firmware(phyf); 3080 3081 return ret; 3082 } 3083 3084 /* 3085 * Attempt to initialize the adapter via a Firmware Configuration File. 3086 */ 3087 static int adap_init0_config(struct adapter *adapter, int reset) 3088 { 3089 struct fw_caps_config_cmd caps_cmd; 3090 const struct firmware *cf; 3091 unsigned long mtype = 0, maddr = 0; 3092 u32 finiver, finicsum, cfcsum; 3093 int ret; 3094 int config_issued = 0; 3095 char *fw_config_file, fw_config_file_path[256]; 3096 char *config_name = NULL; 3097 3098 /* 3099 * Reset device if necessary. 3100 */ 3101 if (reset) { 3102 ret = t4_fw_reset(adapter, adapter->mbox, 3103 PIORSTMODE_F | PIORST_F); 3104 if (ret < 0) 3105 goto bye; 3106 } 3107 3108 /* If this is a 10Gb/s-BT adapter make sure the chip-external 3109 * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs 3110 * to be performed after any global adapter RESET above since some 3111 * PHYs only have local RAM copies of the PHY firmware. 3112 */ 3113 if (is_10gbt_device(adapter->pdev->device)) { 3114 ret = adap_init0_phy(adapter); 3115 if (ret < 0) 3116 goto bye; 3117 } 3118 /* 3119 * If we have a T4 configuration file under /lib/firmware/cxgb4/, 3120 * then use that. Otherwise, use the configuration file stored 3121 * in the adapter flash ... 3122 */ 3123 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) { 3124 case CHELSIO_T4: 3125 fw_config_file = FW4_CFNAME; 3126 break; 3127 case CHELSIO_T5: 3128 fw_config_file = FW5_CFNAME; 3129 break; 3130 case CHELSIO_T6: 3131 fw_config_file = FW6_CFNAME; 3132 break; 3133 default: 3134 dev_err(adapter->pdev_dev, "Device %d is not supported\n", 3135 adapter->pdev->device); 3136 ret = -EINVAL; 3137 goto bye; 3138 } 3139 3140 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev); 3141 if (ret < 0) { 3142 config_name = "On FLASH"; 3143 mtype = FW_MEMTYPE_CF_FLASH; 3144 maddr = t4_flash_cfg_addr(adapter); 3145 } else { 3146 u32 params[7], val[7]; 3147 3148 sprintf(fw_config_file_path, 3149 "/lib/firmware/%s", fw_config_file); 3150 config_name = fw_config_file_path; 3151 3152 if (cf->size >= FLASH_CFG_MAX_SIZE) 3153 ret = -ENOMEM; 3154 else { 3155 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 3156 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF)); 3157 ret = t4_query_params(adapter, adapter->mbox, 3158 adapter->pf, 0, 1, params, val); 3159 if (ret == 0) { 3160 /* 3161 * For t4_memory_rw() below addresses and 3162 * sizes have to be in terms of multiples of 4 3163 * bytes. So, if the Configuration File isn't 3164 * a multiple of 4 bytes in length we'll have 3165 * to write that out separately since we can't 3166 * guarantee that the bytes following the 3167 * residual byte in the buffer returned by 3168 * request_firmware() are zeroed out ... 3169 */ 3170 size_t resid = cf->size & 0x3; 3171 size_t size = cf->size & ~0x3; 3172 __be32 *data = (__be32 *)cf->data; 3173 3174 mtype = FW_PARAMS_PARAM_Y_G(val[0]); 3175 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16; 3176 3177 spin_lock(&adapter->win0_lock); 3178 ret = t4_memory_rw(adapter, 0, mtype, maddr, 3179 size, data, T4_MEMORY_WRITE); 3180 if (ret == 0 && resid != 0) { 3181 union { 3182 __be32 word; 3183 char buf[4]; 3184 } last; 3185 int i; 3186 3187 last.word = data[size >> 2]; 3188 for (i = resid; i < 4; i++) 3189 last.buf[i] = 0; 3190 ret = t4_memory_rw(adapter, 0, mtype, 3191 maddr + size, 3192 4, &last.word, 3193 T4_MEMORY_WRITE); 3194 } 3195 spin_unlock(&adapter->win0_lock); 3196 } 3197 } 3198 3199 release_firmware(cf); 3200 if (ret) 3201 goto bye; 3202 } 3203 3204 /* 3205 * Issue a Capability Configuration command to the firmware to get it 3206 * to parse the Configuration File. We don't use t4_fw_config_file() 3207 * because we want the ability to modify various features after we've 3208 * processed the configuration file ... 3209 */ 3210 memset(&caps_cmd, 0, sizeof(caps_cmd)); 3211 caps_cmd.op_to_write = 3212 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | 3213 FW_CMD_REQUEST_F | 3214 FW_CMD_READ_F); 3215 caps_cmd.cfvalid_to_len16 = 3216 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F | 3217 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) | 3218 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) | 3219 FW_LEN16(caps_cmd)); 3220 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), 3221 &caps_cmd); 3222 3223 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware 3224 * Configuration File in FLASH), our last gasp effort is to use the 3225 * Firmware Configuration File which is embedded in the firmware. A 3226 * very few early versions of the firmware didn't have one embedded 3227 * but we can ignore those. 3228 */ 3229 if (ret == -ENOENT) { 3230 memset(&caps_cmd, 0, sizeof(caps_cmd)); 3231 caps_cmd.op_to_write = 3232 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | 3233 FW_CMD_REQUEST_F | 3234 FW_CMD_READ_F); 3235 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); 3236 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, 3237 sizeof(caps_cmd), &caps_cmd); 3238 config_name = "Firmware Default"; 3239 } 3240 3241 config_issued = 1; 3242 if (ret < 0) 3243 goto bye; 3244 3245 finiver = ntohl(caps_cmd.finiver); 3246 finicsum = ntohl(caps_cmd.finicsum); 3247 cfcsum = ntohl(caps_cmd.cfcsum); 3248 if (finicsum != cfcsum) 3249 dev_warn(adapter->pdev_dev, "Configuration File checksum "\ 3250 "mismatch: [fini] csum=%#x, computed csum=%#x\n", 3251 finicsum, cfcsum); 3252 3253 /* 3254 * And now tell the firmware to use the configuration we just loaded. 3255 */ 3256 caps_cmd.op_to_write = 3257 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | 3258 FW_CMD_REQUEST_F | 3259 FW_CMD_WRITE_F); 3260 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); 3261 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), 3262 NULL); 3263 if (ret < 0) 3264 goto bye; 3265 3266 /* 3267 * Tweak configuration based on system architecture, module 3268 * parameters, etc. 3269 */ 3270 ret = adap_init0_tweaks(adapter); 3271 if (ret < 0) 3272 goto bye; 3273 3274 /* 3275 * And finally tell the firmware to initialize itself using the 3276 * parameters from the Configuration File. 3277 */ 3278 ret = t4_fw_initialize(adapter, adapter->mbox); 3279 if (ret < 0) 3280 goto bye; 3281 3282 /* Emit Firmware Configuration File information and return 3283 * successfully. 3284 */ 3285 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\ 3286 "Configuration File \"%s\", version %#x, computed checksum %#x\n", 3287 config_name, finiver, cfcsum); 3288 return 0; 3289 3290 /* 3291 * Something bad happened. Return the error ... (If the "error" 3292 * is that there's no Configuration File on the adapter we don't 3293 * want to issue a warning since this is fairly common.) 3294 */ 3295 bye: 3296 if (config_issued && ret != -ENOENT) 3297 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n", 3298 config_name, -ret); 3299 return ret; 3300 } 3301 3302 static struct fw_info fw_info_array[] = { 3303 { 3304 .chip = CHELSIO_T4, 3305 .fs_name = FW4_CFNAME, 3306 .fw_mod_name = FW4_FNAME, 3307 .fw_hdr = { 3308 .chip = FW_HDR_CHIP_T4, 3309 .fw_ver = __cpu_to_be32(FW_VERSION(T4)), 3310 .intfver_nic = FW_INTFVER(T4, NIC), 3311 .intfver_vnic = FW_INTFVER(T4, VNIC), 3312 .intfver_ri = FW_INTFVER(T4, RI), 3313 .intfver_iscsi = FW_INTFVER(T4, ISCSI), 3314 .intfver_fcoe = FW_INTFVER(T4, FCOE), 3315 }, 3316 }, { 3317 .chip = CHELSIO_T5, 3318 .fs_name = FW5_CFNAME, 3319 .fw_mod_name = FW5_FNAME, 3320 .fw_hdr = { 3321 .chip = FW_HDR_CHIP_T5, 3322 .fw_ver = __cpu_to_be32(FW_VERSION(T5)), 3323 .intfver_nic = FW_INTFVER(T5, NIC), 3324 .intfver_vnic = FW_INTFVER(T5, VNIC), 3325 .intfver_ri = FW_INTFVER(T5, RI), 3326 .intfver_iscsi = FW_INTFVER(T5, ISCSI), 3327 .intfver_fcoe = FW_INTFVER(T5, FCOE), 3328 }, 3329 }, { 3330 .chip = CHELSIO_T6, 3331 .fs_name = FW6_CFNAME, 3332 .fw_mod_name = FW6_FNAME, 3333 .fw_hdr = { 3334 .chip = FW_HDR_CHIP_T6, 3335 .fw_ver = __cpu_to_be32(FW_VERSION(T6)), 3336 .intfver_nic = FW_INTFVER(T6, NIC), 3337 .intfver_vnic = FW_INTFVER(T6, VNIC), 3338 .intfver_ofld = FW_INTFVER(T6, OFLD), 3339 .intfver_ri = FW_INTFVER(T6, RI), 3340 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU), 3341 .intfver_iscsi = FW_INTFVER(T6, ISCSI), 3342 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU), 3343 .intfver_fcoe = FW_INTFVER(T6, FCOE), 3344 }, 3345 } 3346 3347 }; 3348 3349 static struct fw_info *find_fw_info(int chip) 3350 { 3351 int i; 3352 3353 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) { 3354 if (fw_info_array[i].chip == chip) 3355 return &fw_info_array[i]; 3356 } 3357 return NULL; 3358 } 3359 3360 /* 3361 * Phase 0 of initialization: contact FW, obtain config, perform basic init. 3362 */ 3363 static int adap_init0(struct adapter *adap) 3364 { 3365 int ret; 3366 u32 v, port_vec; 3367 enum dev_state state; 3368 u32 params[7], val[7]; 3369 struct fw_caps_config_cmd caps_cmd; 3370 int reset = 1; 3371 3372 /* Grab Firmware Device Log parameters as early as possible so we have 3373 * access to it for debugging, etc. 3374 */ 3375 ret = t4_init_devlog_params(adap); 3376 if (ret < 0) 3377 return ret; 3378 3379 /* Contact FW, advertising Master capability */ 3380 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, 3381 is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state); 3382 if (ret < 0) { 3383 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", 3384 ret); 3385 return ret; 3386 } 3387 if (ret == adap->mbox) 3388 adap->flags |= MASTER_PF; 3389 3390 /* 3391 * If we're the Master PF Driver and the device is uninitialized, 3392 * then let's consider upgrading the firmware ... (We always want 3393 * to check the firmware version number in order to A. get it for 3394 * later reporting and B. to warn if the currently loaded firmware 3395 * is excessively mismatched relative to the driver.) 3396 */ 3397 t4_get_fw_version(adap, &adap->params.fw_vers); 3398 t4_get_bs_version(adap, &adap->params.bs_vers); 3399 t4_get_tp_version(adap, &adap->params.tp_vers); 3400 t4_get_exprom_version(adap, &adap->params.er_vers); 3401 3402 ret = t4_check_fw_version(adap); 3403 /* If firmware is too old (not supported by driver) force an update. */ 3404 if (ret) 3405 state = DEV_STATE_UNINIT; 3406 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { 3407 struct fw_info *fw_info; 3408 struct fw_hdr *card_fw; 3409 const struct firmware *fw; 3410 const u8 *fw_data = NULL; 3411 unsigned int fw_size = 0; 3412 3413 /* This is the firmware whose headers the driver was compiled 3414 * against 3415 */ 3416 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip)); 3417 if (fw_info == NULL) { 3418 dev_err(adap->pdev_dev, 3419 "unable to get firmware info for chip %d.\n", 3420 CHELSIO_CHIP_VERSION(adap->params.chip)); 3421 return -EINVAL; 3422 } 3423 3424 /* allocate memory to read the header of the firmware on the 3425 * card 3426 */ 3427 card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL); 3428 3429 /* Get FW from from /lib/firmware/ */ 3430 ret = request_firmware(&fw, fw_info->fw_mod_name, 3431 adap->pdev_dev); 3432 if (ret < 0) { 3433 dev_err(adap->pdev_dev, 3434 "unable to load firmware image %s, error %d\n", 3435 fw_info->fw_mod_name, ret); 3436 } else { 3437 fw_data = fw->data; 3438 fw_size = fw->size; 3439 } 3440 3441 /* upgrade FW logic */ 3442 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw, 3443 state, &reset); 3444 3445 /* Cleaning up */ 3446 release_firmware(fw); 3447 kvfree(card_fw); 3448 3449 if (ret < 0) 3450 goto bye; 3451 } 3452 3453 /* 3454 * Grab VPD parameters. This should be done after we establish a 3455 * connection to the firmware since some of the VPD parameters 3456 * (notably the Core Clock frequency) are retrieved via requests to 3457 * the firmware. On the other hand, we need these fairly early on 3458 * so we do this right after getting ahold of the firmware. 3459 */ 3460 ret = t4_get_vpd_params(adap, &adap->params.vpd); 3461 if (ret < 0) 3462 goto bye; 3463 3464 /* 3465 * Find out what ports are available to us. Note that we need to do 3466 * this before calling adap_init0_no_config() since it needs nports 3467 * and portvec ... 3468 */ 3469 v = 3470 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 3471 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC); 3472 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec); 3473 if (ret < 0) 3474 goto bye; 3475 3476 adap->params.nports = hweight32(port_vec); 3477 adap->params.portvec = port_vec; 3478 3479 /* If the firmware is initialized already, emit a simply note to that 3480 * effect. Otherwise, it's time to try initializing the adapter. 3481 */ 3482 if (state == DEV_STATE_INIT) { 3483 dev_info(adap->pdev_dev, "Coming up as %s: "\ 3484 "Adapter already initialized\n", 3485 adap->flags & MASTER_PF ? "MASTER" : "SLAVE"); 3486 } else { 3487 dev_info(adap->pdev_dev, "Coming up as MASTER: "\ 3488 "Initializing adapter\n"); 3489 3490 /* Find out whether we're dealing with a version of the 3491 * firmware which has configuration file support. 3492 */ 3493 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 3494 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF)); 3495 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, 3496 params, val); 3497 3498 /* If the firmware doesn't support Configuration Files, 3499 * return an error. 3500 */ 3501 if (ret < 0) { 3502 dev_err(adap->pdev_dev, "firmware doesn't support " 3503 "Firmware Configuration Files\n"); 3504 goto bye; 3505 } 3506 3507 /* The firmware provides us with a memory buffer where we can 3508 * load a Configuration File from the host if we want to 3509 * override the Configuration File in flash. 3510 */ 3511 ret = adap_init0_config(adap, reset); 3512 if (ret == -ENOENT) { 3513 dev_err(adap->pdev_dev, "no Configuration File " 3514 "present on adapter.\n"); 3515 goto bye; 3516 } 3517 if (ret < 0) { 3518 dev_err(adap->pdev_dev, "could not initialize " 3519 "adapter, error %d\n", -ret); 3520 goto bye; 3521 } 3522 } 3523 3524 /* Give the SGE code a chance to pull in anything that it needs ... 3525 * Note that this must be called after we retrieve our VPD parameters 3526 * in order to know how to convert core ticks to seconds, etc. 3527 */ 3528 ret = t4_sge_init(adap); 3529 if (ret < 0) 3530 goto bye; 3531 3532 if (is_bypass_device(adap->pdev->device)) 3533 adap->params.bypass = 1; 3534 3535 /* 3536 * Grab some of our basic fundamental operating parameters. 3537 */ 3538 #define FW_PARAM_DEV(param) \ 3539 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \ 3540 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param)) 3541 3542 #define FW_PARAM_PFVF(param) \ 3543 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \ 3544 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \ 3545 FW_PARAMS_PARAM_Y_V(0) | \ 3546 FW_PARAMS_PARAM_Z_V(0) 3547 3548 params[0] = FW_PARAM_PFVF(EQ_START); 3549 params[1] = FW_PARAM_PFVF(L2T_START); 3550 params[2] = FW_PARAM_PFVF(L2T_END); 3551 params[3] = FW_PARAM_PFVF(FILTER_START); 3552 params[4] = FW_PARAM_PFVF(FILTER_END); 3553 params[5] = FW_PARAM_PFVF(IQFLINT_START); 3554 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val); 3555 if (ret < 0) 3556 goto bye; 3557 adap->sge.egr_start = val[0]; 3558 adap->l2t_start = val[1]; 3559 adap->l2t_end = val[2]; 3560 adap->tids.ftid_base = val[3]; 3561 adap->tids.nftids = val[4] - val[3] + 1; 3562 adap->sge.ingr_start = val[5]; 3563 3564 /* qids (ingress/egress) returned from firmware can be anywhere 3565 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END. 3566 * Hence driver needs to allocate memory for this range to 3567 * store the queue info. Get the highest IQFLINT/EQ index returned 3568 * in FW_EQ_*_CMD.alloc command. 3569 */ 3570 params[0] = FW_PARAM_PFVF(EQ_END); 3571 params[1] = FW_PARAM_PFVF(IQFLINT_END); 3572 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); 3573 if (ret < 0) 3574 goto bye; 3575 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1; 3576 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1; 3577 3578 adap->sge.egr_map = kcalloc(adap->sge.egr_sz, 3579 sizeof(*adap->sge.egr_map), GFP_KERNEL); 3580 if (!adap->sge.egr_map) { 3581 ret = -ENOMEM; 3582 goto bye; 3583 } 3584 3585 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz, 3586 sizeof(*adap->sge.ingr_map), GFP_KERNEL); 3587 if (!adap->sge.ingr_map) { 3588 ret = -ENOMEM; 3589 goto bye; 3590 } 3591 3592 /* Allocate the memory for the vaious egress queue bitmaps 3593 * ie starving_fl, txq_maperr and blocked_fl. 3594 */ 3595 adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), 3596 sizeof(long), GFP_KERNEL); 3597 if (!adap->sge.starving_fl) { 3598 ret = -ENOMEM; 3599 goto bye; 3600 } 3601 3602 adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), 3603 sizeof(long), GFP_KERNEL); 3604 if (!adap->sge.txq_maperr) { 3605 ret = -ENOMEM; 3606 goto bye; 3607 } 3608 3609 #ifdef CONFIG_DEBUG_FS 3610 adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), 3611 sizeof(long), GFP_KERNEL); 3612 if (!adap->sge.blocked_fl) { 3613 ret = -ENOMEM; 3614 goto bye; 3615 } 3616 #endif 3617 3618 params[0] = FW_PARAM_PFVF(CLIP_START); 3619 params[1] = FW_PARAM_PFVF(CLIP_END); 3620 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); 3621 if (ret < 0) 3622 goto bye; 3623 adap->clipt_start = val[0]; 3624 adap->clipt_end = val[1]; 3625 3626 /* We don't yet have a PARAMs calls to retrieve the number of Traffic 3627 * Classes supported by the hardware/firmware so we hard code it here 3628 * for now. 3629 */ 3630 adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16; 3631 3632 /* query params related to active filter region */ 3633 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START); 3634 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END); 3635 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); 3636 /* If Active filter size is set we enable establishing 3637 * offload connection through firmware work request 3638 */ 3639 if ((val[0] != val[1]) && (ret >= 0)) { 3640 adap->flags |= FW_OFLD_CONN; 3641 adap->tids.aftid_base = val[0]; 3642 adap->tids.aftid_end = val[1]; 3643 } 3644 3645 /* If we're running on newer firmware, let it know that we're 3646 * prepared to deal with encapsulated CPL messages. Older 3647 * firmware won't understand this and we'll just get 3648 * unencapsulated messages ... 3649 */ 3650 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); 3651 val[0] = 1; 3652 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val); 3653 3654 /* 3655 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL 3656 * capability. Earlier versions of the firmware didn't have the 3657 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no 3658 * permission to use ULPTX MEMWRITE DSGL. 3659 */ 3660 if (is_t4(adap->params.chip)) { 3661 adap->params.ulptx_memwrite_dsgl = false; 3662 } else { 3663 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL); 3664 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 3665 1, params, val); 3666 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0); 3667 } 3668 3669 /* See if FW supports FW_RI_FR_NSMR_TPTE_WR work request */ 3670 params[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR); 3671 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 3672 1, params, val); 3673 adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0); 3674 3675 /* 3676 * Get device capabilities so we can determine what resources we need 3677 * to manage. 3678 */ 3679 memset(&caps_cmd, 0, sizeof(caps_cmd)); 3680 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | 3681 FW_CMD_REQUEST_F | FW_CMD_READ_F); 3682 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); 3683 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), 3684 &caps_cmd); 3685 if (ret < 0) 3686 goto bye; 3687 3688 if (caps_cmd.ofldcaps) { 3689 /* query offload-related parameters */ 3690 params[0] = FW_PARAM_DEV(NTID); 3691 params[1] = FW_PARAM_PFVF(SERVER_START); 3692 params[2] = FW_PARAM_PFVF(SERVER_END); 3693 params[3] = FW_PARAM_PFVF(TDDP_START); 3694 params[4] = FW_PARAM_PFVF(TDDP_END); 3695 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3696 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, 3697 params, val); 3698 if (ret < 0) 3699 goto bye; 3700 adap->tids.ntids = val[0]; 3701 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); 3702 adap->tids.stid_base = val[1]; 3703 adap->tids.nstids = val[2] - val[1] + 1; 3704 /* 3705 * Setup server filter region. Divide the available filter 3706 * region into two parts. Regular filters get 1/3rd and server 3707 * filters get 2/3rd part. This is only enabled if workarond 3708 * path is enabled. 3709 * 1. For regular filters. 3710 * 2. Server filter: This are special filters which are used 3711 * to redirect SYN packets to offload queue. 3712 */ 3713 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) { 3714 adap->tids.sftid_base = adap->tids.ftid_base + 3715 DIV_ROUND_UP(adap->tids.nftids, 3); 3716 adap->tids.nsftids = adap->tids.nftids - 3717 DIV_ROUND_UP(adap->tids.nftids, 3); 3718 adap->tids.nftids = adap->tids.sftid_base - 3719 adap->tids.ftid_base; 3720 } 3721 adap->vres.ddp.start = val[3]; 3722 adap->vres.ddp.size = val[4] - val[3] + 1; 3723 adap->params.ofldq_wr_cred = val[5]; 3724 3725 adap->params.offload = 1; 3726 adap->num_ofld_uld += 1; 3727 } 3728 if (caps_cmd.rdmacaps) { 3729 params[0] = FW_PARAM_PFVF(STAG_START); 3730 params[1] = FW_PARAM_PFVF(STAG_END); 3731 params[2] = FW_PARAM_PFVF(RQ_START); 3732 params[3] = FW_PARAM_PFVF(RQ_END); 3733 params[4] = FW_PARAM_PFVF(PBL_START); 3734 params[5] = FW_PARAM_PFVF(PBL_END); 3735 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, 3736 params, val); 3737 if (ret < 0) 3738 goto bye; 3739 adap->vres.stag.start = val[0]; 3740 adap->vres.stag.size = val[1] - val[0] + 1; 3741 adap->vres.rq.start = val[2]; 3742 adap->vres.rq.size = val[3] - val[2] + 1; 3743 adap->vres.pbl.start = val[4]; 3744 adap->vres.pbl.size = val[5] - val[4] + 1; 3745 3746 params[0] = FW_PARAM_PFVF(SQRQ_START); 3747 params[1] = FW_PARAM_PFVF(SQRQ_END); 3748 params[2] = FW_PARAM_PFVF(CQ_START); 3749 params[3] = FW_PARAM_PFVF(CQ_END); 3750 params[4] = FW_PARAM_PFVF(OCQ_START); 3751 params[5] = FW_PARAM_PFVF(OCQ_END); 3752 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, 3753 val); 3754 if (ret < 0) 3755 goto bye; 3756 adap->vres.qp.start = val[0]; 3757 adap->vres.qp.size = val[1] - val[0] + 1; 3758 adap->vres.cq.start = val[2]; 3759 adap->vres.cq.size = val[3] - val[2] + 1; 3760 adap->vres.ocq.start = val[4]; 3761 adap->vres.ocq.size = val[5] - val[4] + 1; 3762 3763 params[0] = FW_PARAM_DEV(MAXORDIRD_QP); 3764 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER); 3765 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, 3766 val); 3767 if (ret < 0) { 3768 adap->params.max_ordird_qp = 8; 3769 adap->params.max_ird_adapter = 32 * adap->tids.ntids; 3770 ret = 0; 3771 } else { 3772 adap->params.max_ordird_qp = val[0]; 3773 adap->params.max_ird_adapter = val[1]; 3774 } 3775 dev_info(adap->pdev_dev, 3776 "max_ordird_qp %d max_ird_adapter %d\n", 3777 adap->params.max_ordird_qp, 3778 adap->params.max_ird_adapter); 3779 adap->num_ofld_uld += 2; 3780 } 3781 if (caps_cmd.iscsicaps) { 3782 params[0] = FW_PARAM_PFVF(ISCSI_START); 3783 params[1] = FW_PARAM_PFVF(ISCSI_END); 3784 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, 3785 params, val); 3786 if (ret < 0) 3787 goto bye; 3788 adap->vres.iscsi.start = val[0]; 3789 adap->vres.iscsi.size = val[1] - val[0] + 1; 3790 /* LIO target and cxgb4i initiaitor */ 3791 adap->num_ofld_uld += 2; 3792 } 3793 if (caps_cmd.cryptocaps) { 3794 /* Should query params here...TODO */ 3795 params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE); 3796 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, 3797 params, val); 3798 if (ret < 0) { 3799 if (ret != -EINVAL) 3800 goto bye; 3801 } else { 3802 adap->vres.ncrypto_fc = val[0]; 3803 } 3804 adap->params.crypto |= ULP_CRYPTO_LOOKASIDE; 3805 adap->num_uld += 1; 3806 } 3807 #undef FW_PARAM_PFVF 3808 #undef FW_PARAM_DEV 3809 3810 /* The MTU/MSS Table is initialized by now, so load their values. If 3811 * we're initializing the adapter, then we'll make any modifications 3812 * we want to the MTU/MSS Table and also initialize the congestion 3813 * parameters. 3814 */ 3815 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); 3816 if (state != DEV_STATE_INIT) { 3817 int i; 3818 3819 /* The default MTU Table contains values 1492 and 1500. 3820 * However, for TCP, it's better to have two values which are 3821 * a multiple of 8 +/- 4 bytes apart near this popular MTU. 3822 * This allows us to have a TCP Data Payload which is a 3823 * multiple of 8 regardless of what combination of TCP Options 3824 * are in use (always a multiple of 4 bytes) which is 3825 * important for performance reasons. For instance, if no 3826 * options are in use, then we have a 20-byte IP header and a 3827 * 20-byte TCP header. In this case, a 1500-byte MSS would 3828 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes 3829 * which is not a multiple of 8. So using an MSS of 1488 in 3830 * this case results in a TCP Data Payload of 1448 bytes which 3831 * is a multiple of 8. On the other hand, if 12-byte TCP Time 3832 * Stamps have been negotiated, then an MTU of 1500 bytes 3833 * results in a TCP Data Payload of 1448 bytes which, as 3834 * above, is a multiple of 8 bytes ... 3835 */ 3836 for (i = 0; i < NMTUS; i++) 3837 if (adap->params.mtus[i] == 1492) { 3838 adap->params.mtus[i] = 1488; 3839 break; 3840 } 3841 3842 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, 3843 adap->params.b_wnd); 3844 } 3845 t4_init_sge_params(adap); 3846 adap->flags |= FW_OK; 3847 t4_init_tp_params(adap); 3848 return 0; 3849 3850 /* 3851 * Something bad happened. If a command timed out or failed with EIO 3852 * FW does not operate within its spec or something catastrophic 3853 * happened to HW/FW, stop issuing commands. 3854 */ 3855 bye: 3856 kfree(adap->sge.egr_map); 3857 kfree(adap->sge.ingr_map); 3858 kfree(adap->sge.starving_fl); 3859 kfree(adap->sge.txq_maperr); 3860 #ifdef CONFIG_DEBUG_FS 3861 kfree(adap->sge.blocked_fl); 3862 #endif 3863 if (ret != -ETIMEDOUT && ret != -EIO) 3864 t4_fw_bye(adap, adap->mbox); 3865 return ret; 3866 } 3867 3868 /* EEH callbacks */ 3869 3870 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev, 3871 pci_channel_state_t state) 3872 { 3873 int i; 3874 struct adapter *adap = pci_get_drvdata(pdev); 3875 3876 if (!adap) 3877 goto out; 3878 3879 rtnl_lock(); 3880 adap->flags &= ~FW_OK; 3881 notify_ulds(adap, CXGB4_STATE_START_RECOVERY); 3882 spin_lock(&adap->stats_lock); 3883 for_each_port(adap, i) { 3884 struct net_device *dev = adap->port[i]; 3885 3886 netif_device_detach(dev); 3887 netif_carrier_off(dev); 3888 } 3889 spin_unlock(&adap->stats_lock); 3890 disable_interrupts(adap); 3891 if (adap->flags & FULL_INIT_DONE) 3892 cxgb_down(adap); 3893 rtnl_unlock(); 3894 if ((adap->flags & DEV_ENABLED)) { 3895 pci_disable_device(pdev); 3896 adap->flags &= ~DEV_ENABLED; 3897 } 3898 out: return state == pci_channel_io_perm_failure ? 3899 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; 3900 } 3901 3902 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev) 3903 { 3904 int i, ret; 3905 struct fw_caps_config_cmd c; 3906 struct adapter *adap = pci_get_drvdata(pdev); 3907 3908 if (!adap) { 3909 pci_restore_state(pdev); 3910 pci_save_state(pdev); 3911 return PCI_ERS_RESULT_RECOVERED; 3912 } 3913 3914 if (!(adap->flags & DEV_ENABLED)) { 3915 if (pci_enable_device(pdev)) { 3916 dev_err(&pdev->dev, "Cannot reenable PCI " 3917 "device after reset\n"); 3918 return PCI_ERS_RESULT_DISCONNECT; 3919 } 3920 adap->flags |= DEV_ENABLED; 3921 } 3922 3923 pci_set_master(pdev); 3924 pci_restore_state(pdev); 3925 pci_save_state(pdev); 3926 pci_cleanup_aer_uncorrect_error_status(pdev); 3927 3928 if (t4_wait_dev_ready(adap->regs) < 0) 3929 return PCI_ERS_RESULT_DISCONNECT; 3930 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0) 3931 return PCI_ERS_RESULT_DISCONNECT; 3932 adap->flags |= FW_OK; 3933 if (adap_init1(adap, &c)) 3934 return PCI_ERS_RESULT_DISCONNECT; 3935 3936 for_each_port(adap, i) { 3937 struct port_info *p = adap2pinfo(adap, i); 3938 3939 ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1, 3940 NULL, NULL); 3941 if (ret < 0) 3942 return PCI_ERS_RESULT_DISCONNECT; 3943 p->viid = ret; 3944 p->xact_addr_filt = -1; 3945 } 3946 3947 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, 3948 adap->params.b_wnd); 3949 setup_memwin(adap); 3950 if (cxgb_up(adap)) 3951 return PCI_ERS_RESULT_DISCONNECT; 3952 return PCI_ERS_RESULT_RECOVERED; 3953 } 3954 3955 static void eeh_resume(struct pci_dev *pdev) 3956 { 3957 int i; 3958 struct adapter *adap = pci_get_drvdata(pdev); 3959 3960 if (!adap) 3961 return; 3962 3963 rtnl_lock(); 3964 for_each_port(adap, i) { 3965 struct net_device *dev = adap->port[i]; 3966 3967 if (netif_running(dev)) { 3968 link_start(dev); 3969 cxgb_set_rxmode(dev); 3970 } 3971 netif_device_attach(dev); 3972 } 3973 rtnl_unlock(); 3974 } 3975 3976 static const struct pci_error_handlers cxgb4_eeh = { 3977 .error_detected = eeh_err_detected, 3978 .slot_reset = eeh_slot_reset, 3979 .resume = eeh_resume, 3980 }; 3981 3982 /* Return true if the Link Configuration supports "High Speeds" (those greater 3983 * than 1Gb/s). 3984 */ 3985 static inline bool is_x_10g_port(const struct link_config *lc) 3986 { 3987 unsigned int speeds, high_speeds; 3988 3989 speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported)); 3990 high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G); 3991 3992 return high_speeds != 0; 3993 } 3994 3995 /* 3996 * Perform default configuration of DMA queues depending on the number and type 3997 * of ports we found and the number of available CPUs. Most settings can be 3998 * modified by the admin prior to actual use. 3999 */ 4000 static void cfg_queues(struct adapter *adap) 4001 { 4002 struct sge *s = &adap->sge; 4003 int i = 0, n10g = 0, qidx = 0; 4004 #ifndef CONFIG_CHELSIO_T4_DCB 4005 int q10g = 0; 4006 #endif 4007 4008 /* Reduce memory usage in kdump environment, disable all offload. 4009 */ 4010 if (is_kdump_kernel()) { 4011 adap->params.offload = 0; 4012 adap->params.crypto = 0; 4013 } else if (is_uld(adap) && t4_uld_mem_alloc(adap)) { 4014 adap->params.offload = 0; 4015 adap->params.crypto = 0; 4016 } 4017 4018 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); 4019 #ifdef CONFIG_CHELSIO_T4_DCB 4020 /* For Data Center Bridging support we need to be able to support up 4021 * to 8 Traffic Priorities; each of which will be assigned to its 4022 * own TX Queue in order to prevent Head-Of-Line Blocking. 4023 */ 4024 if (adap->params.nports * 8 > MAX_ETH_QSETS) { 4025 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n", 4026 MAX_ETH_QSETS, adap->params.nports * 8); 4027 BUG_ON(1); 4028 } 4029 4030 for_each_port(adap, i) { 4031 struct port_info *pi = adap2pinfo(adap, i); 4032 4033 pi->first_qset = qidx; 4034 pi->nqsets = 8; 4035 qidx += pi->nqsets; 4036 } 4037 #else /* !CONFIG_CHELSIO_T4_DCB */ 4038 /* 4039 * We default to 1 queue per non-10G port and up to # of cores queues 4040 * per 10G port. 4041 */ 4042 if (n10g) 4043 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g; 4044 if (q10g > netif_get_num_default_rss_queues()) 4045 q10g = netif_get_num_default_rss_queues(); 4046 4047 for_each_port(adap, i) { 4048 struct port_info *pi = adap2pinfo(adap, i); 4049 4050 pi->first_qset = qidx; 4051 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1; 4052 qidx += pi->nqsets; 4053 } 4054 #endif /* !CONFIG_CHELSIO_T4_DCB */ 4055 4056 s->ethqsets = qidx; 4057 s->max_ethqsets = qidx; /* MSI-X may lower it later */ 4058 4059 if (is_uld(adap)) { 4060 /* 4061 * For offload we use 1 queue/channel if all ports are up to 1G, 4062 * otherwise we divide all available queues amongst the channels 4063 * capped by the number of available cores. 4064 */ 4065 if (n10g) { 4066 i = min_t(int, MAX_OFLD_QSETS, num_online_cpus()); 4067 s->ofldqsets = roundup(i, adap->params.nports); 4068 } else { 4069 s->ofldqsets = adap->params.nports; 4070 } 4071 } 4072 4073 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { 4074 struct sge_eth_rxq *r = &s->ethrxq[i]; 4075 4076 init_rspq(adap, &r->rspq, 5, 10, 1024, 64); 4077 r->fl.size = 72; 4078 } 4079 4080 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++) 4081 s->ethtxq[i].q.size = 1024; 4082 4083 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) 4084 s->ctrlq[i].q.size = 512; 4085 4086 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64); 4087 init_rspq(adap, &s->intrq, 0, 1, 512, 64); 4088 } 4089 4090 /* 4091 * Reduce the number of Ethernet queues across all ports to at most n. 4092 * n provides at least one queue per port. 4093 */ 4094 static void reduce_ethqs(struct adapter *adap, int n) 4095 { 4096 int i; 4097 struct port_info *pi; 4098 4099 while (n < adap->sge.ethqsets) 4100 for_each_port(adap, i) { 4101 pi = adap2pinfo(adap, i); 4102 if (pi->nqsets > 1) { 4103 pi->nqsets--; 4104 adap->sge.ethqsets--; 4105 if (adap->sge.ethqsets <= n) 4106 break; 4107 } 4108 } 4109 4110 n = 0; 4111 for_each_port(adap, i) { 4112 pi = adap2pinfo(adap, i); 4113 pi->first_qset = n; 4114 n += pi->nqsets; 4115 } 4116 } 4117 4118 static int get_msix_info(struct adapter *adap) 4119 { 4120 struct uld_msix_info *msix_info; 4121 unsigned int max_ingq = 0; 4122 4123 if (is_offload(adap)) 4124 max_ingq += MAX_OFLD_QSETS * adap->num_ofld_uld; 4125 if (is_pci_uld(adap)) 4126 max_ingq += MAX_OFLD_QSETS * adap->num_uld; 4127 4128 if (!max_ingq) 4129 goto out; 4130 4131 msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL); 4132 if (!msix_info) 4133 return -ENOMEM; 4134 4135 adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq), 4136 sizeof(long), GFP_KERNEL); 4137 if (!adap->msix_bmap_ulds.msix_bmap) { 4138 kfree(msix_info); 4139 return -ENOMEM; 4140 } 4141 spin_lock_init(&adap->msix_bmap_ulds.lock); 4142 adap->msix_info_ulds = msix_info; 4143 out: 4144 return 0; 4145 } 4146 4147 static void free_msix_info(struct adapter *adap) 4148 { 4149 if (!(adap->num_uld && adap->num_ofld_uld)) 4150 return; 4151 4152 kfree(adap->msix_info_ulds); 4153 kfree(adap->msix_bmap_ulds.msix_bmap); 4154 } 4155 4156 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */ 4157 #define EXTRA_VECS 2 4158 4159 static int enable_msix(struct adapter *adap) 4160 { 4161 int ofld_need = 0, uld_need = 0; 4162 int i, j, want, need, allocated; 4163 struct sge *s = &adap->sge; 4164 unsigned int nchan = adap->params.nports; 4165 struct msix_entry *entries; 4166 int max_ingq = MAX_INGQ; 4167 4168 if (is_pci_uld(adap)) 4169 max_ingq += (MAX_OFLD_QSETS * adap->num_uld); 4170 if (is_offload(adap)) 4171 max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld); 4172 entries = kmalloc(sizeof(*entries) * (max_ingq + 1), 4173 GFP_KERNEL); 4174 if (!entries) 4175 return -ENOMEM; 4176 4177 /* map for msix */ 4178 if (get_msix_info(adap)) { 4179 adap->params.offload = 0; 4180 adap->params.crypto = 0; 4181 } 4182 4183 for (i = 0; i < max_ingq + 1; ++i) 4184 entries[i].entry = i; 4185 4186 want = s->max_ethqsets + EXTRA_VECS; 4187 if (is_offload(adap)) { 4188 want += adap->num_ofld_uld * s->ofldqsets; 4189 ofld_need = adap->num_ofld_uld * nchan; 4190 } 4191 if (is_pci_uld(adap)) { 4192 want += adap->num_uld * s->ofldqsets; 4193 uld_need = adap->num_uld * nchan; 4194 } 4195 #ifdef CONFIG_CHELSIO_T4_DCB 4196 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for 4197 * each port. 4198 */ 4199 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need; 4200 #else 4201 need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need; 4202 #endif 4203 allocated = pci_enable_msix_range(adap->pdev, entries, need, want); 4204 if (allocated < 0) { 4205 dev_info(adap->pdev_dev, "not enough MSI-X vectors left," 4206 " not using MSI-X\n"); 4207 kfree(entries); 4208 return allocated; 4209 } 4210 4211 /* Distribute available vectors to the various queue groups. 4212 * Every group gets its minimum requirement and NIC gets top 4213 * priority for leftovers. 4214 */ 4215 i = allocated - EXTRA_VECS - ofld_need - uld_need; 4216 if (i < s->max_ethqsets) { 4217 s->max_ethqsets = i; 4218 if (i < s->ethqsets) 4219 reduce_ethqs(adap, i); 4220 } 4221 if (is_uld(adap)) { 4222 if (allocated < want) 4223 s->nqs_per_uld = nchan; 4224 else 4225 s->nqs_per_uld = s->ofldqsets; 4226 } 4227 4228 for (i = 0; i < (s->max_ethqsets + EXTRA_VECS); ++i) 4229 adap->msix_info[i].vec = entries[i].vector; 4230 if (is_uld(adap)) { 4231 for (j = 0 ; i < allocated; ++i, j++) { 4232 adap->msix_info_ulds[j].vec = entries[i].vector; 4233 adap->msix_info_ulds[j].idx = i; 4234 } 4235 adap->msix_bmap_ulds.mapsize = j; 4236 } 4237 dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, " 4238 "nic %d per uld %d\n", 4239 allocated, s->max_ethqsets, s->nqs_per_uld); 4240 4241 kfree(entries); 4242 return 0; 4243 } 4244 4245 #undef EXTRA_VECS 4246 4247 static int init_rss(struct adapter *adap) 4248 { 4249 unsigned int i; 4250 int err; 4251 4252 err = t4_init_rss_mode(adap, adap->mbox); 4253 if (err) 4254 return err; 4255 4256 for_each_port(adap, i) { 4257 struct port_info *pi = adap2pinfo(adap, i); 4258 4259 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL); 4260 if (!pi->rss) 4261 return -ENOMEM; 4262 } 4263 return 0; 4264 } 4265 4266 static int cxgb4_get_pcie_dev_link_caps(struct adapter *adap, 4267 enum pci_bus_speed *speed, 4268 enum pcie_link_width *width) 4269 { 4270 u32 lnkcap1, lnkcap2; 4271 int err1, err2; 4272 4273 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */ 4274 4275 *speed = PCI_SPEED_UNKNOWN; 4276 *width = PCIE_LNK_WIDTH_UNKNOWN; 4277 4278 err1 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP, 4279 &lnkcap1); 4280 err2 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP2, 4281 &lnkcap2); 4282 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ 4283 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) 4284 *speed = PCIE_SPEED_8_0GT; 4285 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) 4286 *speed = PCIE_SPEED_5_0GT; 4287 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) 4288 *speed = PCIE_SPEED_2_5GT; 4289 } 4290 if (!err1) { 4291 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT; 4292 if (!lnkcap2) { /* pre-r3.0 */ 4293 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB) 4294 *speed = PCIE_SPEED_5_0GT; 4295 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB) 4296 *speed = PCIE_SPEED_2_5GT; 4297 } 4298 } 4299 4300 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) 4301 return err1 ? err1 : err2 ? err2 : -EINVAL; 4302 return 0; 4303 } 4304 4305 static void cxgb4_check_pcie_caps(struct adapter *adap) 4306 { 4307 enum pcie_link_width width, width_cap; 4308 enum pci_bus_speed speed, speed_cap; 4309 4310 #define PCIE_SPEED_STR(speed) \ 4311 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \ 4312 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \ 4313 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \ 4314 "Unknown") 4315 4316 if (cxgb4_get_pcie_dev_link_caps(adap, &speed_cap, &width_cap)) { 4317 dev_warn(adap->pdev_dev, 4318 "Unable to determine PCIe device BW capabilities\n"); 4319 return; 4320 } 4321 4322 if (pcie_get_minimum_link(adap->pdev, &speed, &width) || 4323 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) { 4324 dev_warn(adap->pdev_dev, 4325 "Unable to determine PCI Express bandwidth.\n"); 4326 return; 4327 } 4328 4329 dev_info(adap->pdev_dev, "PCIe link speed is %s, device supports %s\n", 4330 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap)); 4331 dev_info(adap->pdev_dev, "PCIe link width is x%d, device supports x%d\n", 4332 width, width_cap); 4333 if (speed < speed_cap || width < width_cap) 4334 dev_info(adap->pdev_dev, 4335 "A slot with more lanes and/or higher speed is " 4336 "suggested for optimal performance.\n"); 4337 } 4338 4339 /* Dump basic information about the adapter */ 4340 static void print_adapter_info(struct adapter *adapter) 4341 { 4342 /* Device information */ 4343 dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n", 4344 adapter->params.vpd.id, 4345 CHELSIO_CHIP_RELEASE(adapter->params.chip)); 4346 dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n", 4347 adapter->params.vpd.sn, adapter->params.vpd.pn); 4348 4349 /* Firmware Version */ 4350 if (!adapter->params.fw_vers) 4351 dev_warn(adapter->pdev_dev, "No firmware loaded\n"); 4352 else 4353 dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n", 4354 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers), 4355 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers), 4356 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers), 4357 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers)); 4358 4359 /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap 4360 * Firmware, so dev_info() is more appropriate here.) 4361 */ 4362 if (!adapter->params.bs_vers) 4363 dev_info(adapter->pdev_dev, "No bootstrap loaded\n"); 4364 else 4365 dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n", 4366 FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers), 4367 FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers), 4368 FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers), 4369 FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers)); 4370 4371 /* TP Microcode Version */ 4372 if (!adapter->params.tp_vers) 4373 dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n"); 4374 else 4375 dev_info(adapter->pdev_dev, 4376 "TP Microcode version: %u.%u.%u.%u\n", 4377 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers), 4378 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers), 4379 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers), 4380 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers)); 4381 4382 /* Expansion ROM version */ 4383 if (!adapter->params.er_vers) 4384 dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n"); 4385 else 4386 dev_info(adapter->pdev_dev, 4387 "Expansion ROM version: %u.%u.%u.%u\n", 4388 FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers), 4389 FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers), 4390 FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers), 4391 FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers)); 4392 4393 /* Software/Hardware configuration */ 4394 dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n", 4395 is_offload(adapter) ? "R" : "", 4396 ((adapter->flags & USING_MSIX) ? "MSI-X" : 4397 (adapter->flags & USING_MSI) ? "MSI" : ""), 4398 is_offload(adapter) ? "Offload" : "non-Offload"); 4399 } 4400 4401 static void print_port_info(const struct net_device *dev) 4402 { 4403 char buf[80]; 4404 char *bufp = buf; 4405 const char *spd = ""; 4406 const struct port_info *pi = netdev_priv(dev); 4407 const struct adapter *adap = pi->adapter; 4408 4409 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB) 4410 spd = " 2.5 GT/s"; 4411 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB) 4412 spd = " 5 GT/s"; 4413 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB) 4414 spd = " 8 GT/s"; 4415 4416 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) 4417 bufp += sprintf(bufp, "100M/"); 4418 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) 4419 bufp += sprintf(bufp, "1G/"); 4420 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) 4421 bufp += sprintf(bufp, "10G/"); 4422 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G) 4423 bufp += sprintf(bufp, "25G/"); 4424 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) 4425 bufp += sprintf(bufp, "40G/"); 4426 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G) 4427 bufp += sprintf(bufp, "100G/"); 4428 if (bufp != buf) 4429 --bufp; 4430 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type)); 4431 4432 netdev_info(dev, "%s: Chelsio %s (%s) %s\n", 4433 dev->name, adap->params.vpd.id, adap->name, buf); 4434 } 4435 4436 static void enable_pcie_relaxed_ordering(struct pci_dev *dev) 4437 { 4438 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); 4439 } 4440 4441 /* 4442 * Free the following resources: 4443 * - memory used for tables 4444 * - MSI/MSI-X 4445 * - net devices 4446 * - resources FW is holding for us 4447 */ 4448 static void free_some_resources(struct adapter *adapter) 4449 { 4450 unsigned int i; 4451 4452 kvfree(adapter->l2t); 4453 t4_cleanup_sched(adapter); 4454 kvfree(adapter->tids.tid_tab); 4455 cxgb4_cleanup_tc_u32(adapter); 4456 kfree(adapter->sge.egr_map); 4457 kfree(adapter->sge.ingr_map); 4458 kfree(adapter->sge.starving_fl); 4459 kfree(adapter->sge.txq_maperr); 4460 #ifdef CONFIG_DEBUG_FS 4461 kfree(adapter->sge.blocked_fl); 4462 #endif 4463 disable_msi(adapter); 4464 4465 for_each_port(adapter, i) 4466 if (adapter->port[i]) { 4467 struct port_info *pi = adap2pinfo(adapter, i); 4468 4469 if (pi->viid != 0) 4470 t4_free_vi(adapter, adapter->mbox, adapter->pf, 4471 0, pi->viid); 4472 kfree(adap2pinfo(adapter, i)->rss); 4473 free_netdev(adapter->port[i]); 4474 } 4475 if (adapter->flags & FW_OK) 4476 t4_fw_bye(adapter, adapter->pf); 4477 } 4478 4479 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) 4480 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ 4481 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) 4482 #define SEGMENT_SIZE 128 4483 4484 static int get_chip_type(struct pci_dev *pdev, u32 pl_rev) 4485 { 4486 u16 device_id; 4487 4488 /* Retrieve adapter's device ID */ 4489 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id); 4490 4491 switch (device_id >> 12) { 4492 case CHELSIO_T4: 4493 return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev); 4494 case CHELSIO_T5: 4495 return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev); 4496 case CHELSIO_T6: 4497 return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev); 4498 default: 4499 dev_err(&pdev->dev, "Device %d is not supported\n", 4500 device_id); 4501 } 4502 return -EINVAL; 4503 } 4504 4505 #ifdef CONFIG_PCI_IOV 4506 static void dummy_setup(struct net_device *dev) 4507 { 4508 dev->type = ARPHRD_NONE; 4509 dev->mtu = 0; 4510 dev->hard_header_len = 0; 4511 dev->addr_len = 0; 4512 dev->tx_queue_len = 0; 4513 dev->flags |= IFF_NOARP; 4514 dev->priv_flags |= IFF_NO_QUEUE; 4515 4516 /* Initialize the device structure. */ 4517 dev->netdev_ops = &cxgb4_mgmt_netdev_ops; 4518 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; 4519 dev->destructor = free_netdev; 4520 } 4521 4522 static int config_mgmt_dev(struct pci_dev *pdev) 4523 { 4524 struct adapter *adap = pci_get_drvdata(pdev); 4525 struct net_device *netdev; 4526 struct port_info *pi; 4527 char name[IFNAMSIZ]; 4528 int err; 4529 4530 snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf); 4531 netdev = alloc_netdev(sizeof(struct port_info), name, NET_NAME_UNKNOWN, 4532 dummy_setup); 4533 if (!netdev) 4534 return -ENOMEM; 4535 4536 pi = netdev_priv(netdev); 4537 pi->adapter = adap; 4538 pi->port_id = adap->pf % adap->params.nports; 4539 SET_NETDEV_DEV(netdev, &pdev->dev); 4540 4541 adap->port[0] = netdev; 4542 4543 err = register_netdev(adap->port[0]); 4544 if (err) { 4545 pr_info("Unable to register VF mgmt netdev %s\n", name); 4546 free_netdev(adap->port[0]); 4547 adap->port[0] = NULL; 4548 return err; 4549 } 4550 return 0; 4551 } 4552 4553 static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) 4554 { 4555 struct adapter *adap = pci_get_drvdata(pdev); 4556 int err = 0; 4557 int current_vfs = pci_num_vf(pdev); 4558 u32 pcie_fw; 4559 4560 pcie_fw = readl(adap->regs + PCIE_FW_A); 4561 /* Check if cxgb4 is the MASTER and fw is initialized */ 4562 if (!(pcie_fw & PCIE_FW_INIT_F) || 4563 !(pcie_fw & PCIE_FW_MASTER_VLD_F) || 4564 PCIE_FW_MASTER_G(pcie_fw) != 4) { 4565 dev_warn(&pdev->dev, 4566 "cxgb4 driver needs to be MASTER to support SRIOV\n"); 4567 return -EOPNOTSUPP; 4568 } 4569 4570 /* If any of the VF's is already assigned to Guest OS, then 4571 * SRIOV for the same cannot be modified 4572 */ 4573 if (current_vfs && pci_vfs_assigned(pdev)) { 4574 dev_err(&pdev->dev, 4575 "Cannot modify SR-IOV while VFs are assigned\n"); 4576 num_vfs = current_vfs; 4577 return num_vfs; 4578 } 4579 4580 /* Disable SRIOV when zero is passed. 4581 * One needs to disable SRIOV before modifying it, else 4582 * stack throws the below warning: 4583 * " 'n' VFs already enabled. Disable before enabling 'm' VFs." 4584 */ 4585 if (!num_vfs) { 4586 pci_disable_sriov(pdev); 4587 if (adap->port[0]) { 4588 unregister_netdev(adap->port[0]); 4589 adap->port[0] = NULL; 4590 } 4591 /* free VF resources */ 4592 kfree(adap->vfinfo); 4593 adap->vfinfo = NULL; 4594 adap->num_vfs = 0; 4595 return num_vfs; 4596 } 4597 4598 if (num_vfs != current_vfs) { 4599 err = pci_enable_sriov(pdev, num_vfs); 4600 if (err) 4601 return err; 4602 4603 adap->num_vfs = num_vfs; 4604 err = config_mgmt_dev(pdev); 4605 if (err) 4606 return err; 4607 } 4608 4609 adap->vfinfo = kcalloc(adap->num_vfs, 4610 sizeof(struct vf_info), GFP_KERNEL); 4611 if (adap->vfinfo) 4612 fill_vf_station_mac_addr(adap); 4613 return num_vfs; 4614 } 4615 #endif 4616 4617 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 4618 { 4619 int func, i, err, s_qpp, qpp, num_seg; 4620 struct port_info *pi; 4621 bool highdma = false; 4622 struct adapter *adapter = NULL; 4623 struct net_device *netdev; 4624 void __iomem *regs; 4625 u32 whoami, pl_rev; 4626 enum chip_type chip; 4627 static int adap_idx = 1; 4628 #ifdef CONFIG_PCI_IOV 4629 u32 v, port_vec; 4630 #endif 4631 4632 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); 4633 4634 err = pci_request_regions(pdev, KBUILD_MODNAME); 4635 if (err) { 4636 /* Just info, some other driver may have claimed the device. */ 4637 dev_info(&pdev->dev, "cannot obtain PCI resources\n"); 4638 return err; 4639 } 4640 4641 err = pci_enable_device(pdev); 4642 if (err) { 4643 dev_err(&pdev->dev, "cannot enable PCI device\n"); 4644 goto out_release_regions; 4645 } 4646 4647 regs = pci_ioremap_bar(pdev, 0); 4648 if (!regs) { 4649 dev_err(&pdev->dev, "cannot map device registers\n"); 4650 err = -ENOMEM; 4651 goto out_disable_device; 4652 } 4653 4654 err = t4_wait_dev_ready(regs); 4655 if (err < 0) 4656 goto out_unmap_bar0; 4657 4658 /* We control everything through one PF */ 4659 whoami = readl(regs + PL_WHOAMI_A); 4660 pl_rev = REV_G(readl(regs + PL_REV_A)); 4661 chip = get_chip_type(pdev, pl_rev); 4662 func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ? 4663 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); 4664 if (func != ent->driver_data) { 4665 #ifndef CONFIG_PCI_IOV 4666 iounmap(regs); 4667 #endif 4668 pci_disable_device(pdev); 4669 pci_save_state(pdev); /* to restore SR-IOV later */ 4670 goto sriov; 4671 } 4672 4673 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 4674 highdma = true; 4675 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 4676 if (err) { 4677 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " 4678 "coherent allocations\n"); 4679 goto out_unmap_bar0; 4680 } 4681 } else { 4682 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 4683 if (err) { 4684 dev_err(&pdev->dev, "no usable DMA configuration\n"); 4685 goto out_unmap_bar0; 4686 } 4687 } 4688 4689 pci_enable_pcie_error_reporting(pdev); 4690 enable_pcie_relaxed_ordering(pdev); 4691 pci_set_master(pdev); 4692 pci_save_state(pdev); 4693 4694 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 4695 if (!adapter) { 4696 err = -ENOMEM; 4697 goto out_unmap_bar0; 4698 } 4699 adap_idx++; 4700 4701 adapter->workq = create_singlethread_workqueue("cxgb4"); 4702 if (!adapter->workq) { 4703 err = -ENOMEM; 4704 goto out_free_adapter; 4705 } 4706 4707 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + 4708 (sizeof(struct mbox_cmd) * 4709 T4_OS_LOG_MBOX_CMDS), 4710 GFP_KERNEL); 4711 if (!adapter->mbox_log) { 4712 err = -ENOMEM; 4713 goto out_free_adapter; 4714 } 4715 adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS; 4716 4717 /* PCI device has been enabled */ 4718 adapter->flags |= DEV_ENABLED; 4719 4720 adapter->regs = regs; 4721 adapter->pdev = pdev; 4722 adapter->pdev_dev = &pdev->dev; 4723 adapter->name = pci_name(pdev); 4724 adapter->mbox = func; 4725 adapter->pf = func; 4726 adapter->msg_enable = DFLT_MSG_ENABLE; 4727 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); 4728 4729 spin_lock_init(&adapter->stats_lock); 4730 spin_lock_init(&adapter->tid_release_lock); 4731 spin_lock_init(&adapter->win0_lock); 4732 spin_lock_init(&adapter->mbox_lock); 4733 4734 INIT_LIST_HEAD(&adapter->mlist.list); 4735 4736 INIT_WORK(&adapter->tid_release_task, process_tid_release_list); 4737 INIT_WORK(&adapter->db_full_task, process_db_full); 4738 INIT_WORK(&adapter->db_drop_task, process_db_drop); 4739 4740 err = t4_prep_adapter(adapter); 4741 if (err) 4742 goto out_free_adapter; 4743 4744 4745 if (!is_t4(adapter->params.chip)) { 4746 s_qpp = (QUEUESPERPAGEPF0_S + 4747 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * 4748 adapter->pf); 4749 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter, 4750 SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp); 4751 num_seg = PAGE_SIZE / SEGMENT_SIZE; 4752 4753 /* Each segment size is 128B. Write coalescing is enabled only 4754 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the 4755 * queue is less no of segments that can be accommodated in 4756 * a page size. 4757 */ 4758 if (qpp > num_seg) { 4759 dev_err(&pdev->dev, 4760 "Incorrect number of egress queues per page\n"); 4761 err = -EINVAL; 4762 goto out_free_adapter; 4763 } 4764 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), 4765 pci_resource_len(pdev, 2)); 4766 if (!adapter->bar2) { 4767 dev_err(&pdev->dev, "cannot map device bar2 region\n"); 4768 err = -ENOMEM; 4769 goto out_free_adapter; 4770 } 4771 } 4772 4773 setup_memwin(adapter); 4774 err = adap_init0(adapter); 4775 #ifdef CONFIG_DEBUG_FS 4776 bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz); 4777 #endif 4778 setup_memwin_rdma(adapter); 4779 if (err) 4780 goto out_unmap_bar; 4781 4782 /* configure SGE_STAT_CFG_A to read WC stats */ 4783 if (!is_t4(adapter->params.chip)) 4784 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) | 4785 (is_t5(adapter->params.chip) ? STATMODE_V(0) : 4786 T6_STATMODE_V(0))); 4787 4788 for_each_port(adapter, i) { 4789 netdev = alloc_etherdev_mq(sizeof(struct port_info), 4790 MAX_ETH_QSETS); 4791 if (!netdev) { 4792 err = -ENOMEM; 4793 goto out_free_dev; 4794 } 4795 4796 SET_NETDEV_DEV(netdev, &pdev->dev); 4797 4798 adapter->port[i] = netdev; 4799 pi = netdev_priv(netdev); 4800 pi->adapter = adapter; 4801 pi->xact_addr_filt = -1; 4802 pi->port_id = i; 4803 netdev->irq = pdev->irq; 4804 4805 netdev->hw_features = NETIF_F_SG | TSO_FLAGS | 4806 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 4807 NETIF_F_RXCSUM | NETIF_F_RXHASH | 4808 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 4809 NETIF_F_HW_TC; 4810 if (highdma) 4811 netdev->hw_features |= NETIF_F_HIGHDMA; 4812 netdev->features |= netdev->hw_features; 4813 netdev->vlan_features = netdev->features & VLAN_FEAT; 4814 4815 netdev->priv_flags |= IFF_UNICAST_FLT; 4816 4817 /* MTU range: 81 - 9600 */ 4818 netdev->min_mtu = 81; 4819 netdev->max_mtu = MAX_MTU; 4820 4821 netdev->netdev_ops = &cxgb4_netdev_ops; 4822 #ifdef CONFIG_CHELSIO_T4_DCB 4823 netdev->dcbnl_ops = &cxgb4_dcb_ops; 4824 cxgb4_dcb_state_init(netdev); 4825 #endif 4826 cxgb4_set_ethtool_ops(netdev); 4827 } 4828 4829 pci_set_drvdata(pdev, adapter); 4830 4831 if (adapter->flags & FW_OK) { 4832 err = t4_port_init(adapter, func, func, 0); 4833 if (err) 4834 goto out_free_dev; 4835 } else if (adapter->params.nports == 1) { 4836 /* If we don't have a connection to the firmware -- possibly 4837 * because of an error -- grab the raw VPD parameters so we 4838 * can set the proper MAC Address on the debug network 4839 * interface that we've created. 4840 */ 4841 u8 hw_addr[ETH_ALEN]; 4842 u8 *na = adapter->params.vpd.na; 4843 4844 err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd); 4845 if (!err) { 4846 for (i = 0; i < ETH_ALEN; i++) 4847 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 + 4848 hex2val(na[2 * i + 1])); 4849 t4_set_hw_addr(adapter, 0, hw_addr); 4850 } 4851 } 4852 4853 /* Configure queues and allocate tables now, they can be needed as 4854 * soon as the first register_netdev completes. 4855 */ 4856 cfg_queues(adapter); 4857 4858 adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end); 4859 if (!adapter->l2t) { 4860 /* We tolerate a lack of L2T, giving up some functionality */ 4861 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n"); 4862 adapter->params.offload = 0; 4863 } 4864 4865 #if IS_ENABLED(CONFIG_IPV6) 4866 if ((CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) && 4867 (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) { 4868 /* CLIP functionality is not present in hardware, 4869 * hence disable all offload features 4870 */ 4871 dev_warn(&pdev->dev, 4872 "CLIP not enabled in hardware, continuing\n"); 4873 adapter->params.offload = 0; 4874 } else { 4875 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start, 4876 adapter->clipt_end); 4877 if (!adapter->clipt) { 4878 /* We tolerate a lack of clip_table, giving up 4879 * some functionality 4880 */ 4881 dev_warn(&pdev->dev, 4882 "could not allocate Clip table, continuing\n"); 4883 adapter->params.offload = 0; 4884 } 4885 } 4886 #endif 4887 4888 for_each_port(adapter, i) { 4889 pi = adap2pinfo(adapter, i); 4890 pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls); 4891 if (!pi->sched_tbl) 4892 dev_warn(&pdev->dev, 4893 "could not activate scheduling on port %d\n", 4894 i); 4895 } 4896 4897 if (tid_init(&adapter->tids) < 0) { 4898 dev_warn(&pdev->dev, "could not allocate TID table, " 4899 "continuing\n"); 4900 adapter->params.offload = 0; 4901 } else { 4902 adapter->tc_u32 = cxgb4_init_tc_u32(adapter); 4903 if (!adapter->tc_u32) 4904 dev_warn(&pdev->dev, 4905 "could not offload tc u32, continuing\n"); 4906 } 4907 4908 if (is_offload(adapter)) { 4909 if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) { 4910 u32 hash_base, hash_reg; 4911 4912 if (chip <= CHELSIO_T5) { 4913 hash_reg = LE_DB_TID_HASHBASE_A; 4914 hash_base = t4_read_reg(adapter, hash_reg); 4915 adapter->tids.hash_base = hash_base / 4; 4916 } else { 4917 hash_reg = T6_LE_DB_HASH_TID_BASE_A; 4918 hash_base = t4_read_reg(adapter, hash_reg); 4919 adapter->tids.hash_base = hash_base; 4920 } 4921 } 4922 } 4923 4924 /* See what interrupts we'll be using */ 4925 if (msi > 1 && enable_msix(adapter) == 0) 4926 adapter->flags |= USING_MSIX; 4927 else if (msi > 0 && pci_enable_msi(pdev) == 0) { 4928 adapter->flags |= USING_MSI; 4929 if (msi > 1) 4930 free_msix_info(adapter); 4931 } 4932 4933 /* check for PCI Express bandwidth capabiltites */ 4934 cxgb4_check_pcie_caps(adapter); 4935 4936 err = init_rss(adapter); 4937 if (err) 4938 goto out_free_dev; 4939 4940 /* 4941 * The card is now ready to go. If any errors occur during device 4942 * registration we do not fail the whole card but rather proceed only 4943 * with the ports we manage to register successfully. However we must 4944 * register at least one net device. 4945 */ 4946 for_each_port(adapter, i) { 4947 pi = adap2pinfo(adapter, i); 4948 adapter->port[i]->dev_port = pi->lport; 4949 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets); 4950 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets); 4951 4952 err = register_netdev(adapter->port[i]); 4953 if (err) 4954 break; 4955 adapter->chan_map[pi->tx_chan] = i; 4956 print_port_info(adapter->port[i]); 4957 } 4958 if (i == 0) { 4959 dev_err(&pdev->dev, "could not register any net devices\n"); 4960 goto out_free_dev; 4961 } 4962 if (err) { 4963 dev_warn(&pdev->dev, "only %d net devices registered\n", i); 4964 err = 0; 4965 } 4966 4967 if (cxgb4_debugfs_root) { 4968 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), 4969 cxgb4_debugfs_root); 4970 setup_debugfs(adapter); 4971 } 4972 4973 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 4974 pdev->needs_freset = 1; 4975 4976 if (is_uld(adapter)) { 4977 mutex_lock(&uld_mutex); 4978 list_add_tail(&adapter->list_node, &adapter_list); 4979 mutex_unlock(&uld_mutex); 4980 } 4981 4982 print_adapter_info(adapter); 4983 setup_fw_sge_queues(adapter); 4984 return 0; 4985 4986 sriov: 4987 #ifdef CONFIG_PCI_IOV 4988 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 4989 if (!adapter) { 4990 err = -ENOMEM; 4991 goto free_pci_region; 4992 } 4993 4994 adapter->pdev = pdev; 4995 adapter->pdev_dev = &pdev->dev; 4996 adapter->name = pci_name(pdev); 4997 adapter->mbox = func; 4998 adapter->pf = func; 4999 adapter->regs = regs; 5000 adapter->adap_idx = adap_idx; 5001 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + 5002 (sizeof(struct mbox_cmd) * 5003 T4_OS_LOG_MBOX_CMDS), 5004 GFP_KERNEL); 5005 if (!adapter->mbox_log) { 5006 err = -ENOMEM; 5007 goto free_adapter; 5008 } 5009 spin_lock_init(&adapter->mbox_lock); 5010 INIT_LIST_HEAD(&adapter->mlist.list); 5011 5012 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 5013 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC); 5014 err = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1, 5015 &v, &port_vec); 5016 if (err < 0) { 5017 dev_err(adapter->pdev_dev, "Could not fetch port params\n"); 5018 goto free_adapter; 5019 } 5020 5021 adapter->params.nports = hweight32(port_vec); 5022 pci_set_drvdata(pdev, adapter); 5023 return 0; 5024 5025 free_adapter: 5026 kfree(adapter); 5027 free_pci_region: 5028 iounmap(regs); 5029 pci_disable_sriov(pdev); 5030 pci_release_regions(pdev); 5031 return err; 5032 #else 5033 return 0; 5034 #endif 5035 5036 out_free_dev: 5037 free_some_resources(adapter); 5038 if (adapter->flags & USING_MSIX) 5039 free_msix_info(adapter); 5040 if (adapter->num_uld || adapter->num_ofld_uld) 5041 t4_uld_mem_free(adapter); 5042 out_unmap_bar: 5043 if (!is_t4(adapter->params.chip)) 5044 iounmap(adapter->bar2); 5045 out_free_adapter: 5046 if (adapter->workq) 5047 destroy_workqueue(adapter->workq); 5048 5049 kfree(adapter->mbox_log); 5050 kfree(adapter); 5051 out_unmap_bar0: 5052 iounmap(regs); 5053 out_disable_device: 5054 pci_disable_pcie_error_reporting(pdev); 5055 pci_disable_device(pdev); 5056 out_release_regions: 5057 pci_release_regions(pdev); 5058 return err; 5059 } 5060 5061 static void remove_one(struct pci_dev *pdev) 5062 { 5063 struct adapter *adapter = pci_get_drvdata(pdev); 5064 5065 if (!adapter) { 5066 pci_release_regions(pdev); 5067 return; 5068 } 5069 5070 if (adapter->pf == 4) { 5071 int i; 5072 5073 /* Tear down per-adapter Work Queue first since it can contain 5074 * references to our adapter data structure. 5075 */ 5076 destroy_workqueue(adapter->workq); 5077 5078 if (is_uld(adapter)) 5079 detach_ulds(adapter); 5080 5081 disable_interrupts(adapter); 5082 5083 for_each_port(adapter, i) 5084 if (adapter->port[i]->reg_state == NETREG_REGISTERED) 5085 unregister_netdev(adapter->port[i]); 5086 5087 debugfs_remove_recursive(adapter->debugfs_root); 5088 5089 /* If we allocated filters, free up state associated with any 5090 * valid filters ... 5091 */ 5092 clear_all_filters(adapter); 5093 5094 if (adapter->flags & FULL_INIT_DONE) 5095 cxgb_down(adapter); 5096 5097 if (adapter->flags & USING_MSIX) 5098 free_msix_info(adapter); 5099 if (adapter->num_uld || adapter->num_ofld_uld) 5100 t4_uld_mem_free(adapter); 5101 free_some_resources(adapter); 5102 #if IS_ENABLED(CONFIG_IPV6) 5103 t4_cleanup_clip_tbl(adapter); 5104 #endif 5105 iounmap(adapter->regs); 5106 if (!is_t4(adapter->params.chip)) 5107 iounmap(adapter->bar2); 5108 pci_disable_pcie_error_reporting(pdev); 5109 if ((adapter->flags & DEV_ENABLED)) { 5110 pci_disable_device(pdev); 5111 adapter->flags &= ~DEV_ENABLED; 5112 } 5113 pci_release_regions(pdev); 5114 kfree(adapter->mbox_log); 5115 synchronize_rcu(); 5116 kfree(adapter); 5117 } 5118 #ifdef CONFIG_PCI_IOV 5119 else { 5120 if (adapter->port[0]) 5121 unregister_netdev(adapter->port[0]); 5122 iounmap(adapter->regs); 5123 kfree(adapter->vfinfo); 5124 kfree(adapter); 5125 pci_disable_sriov(pdev); 5126 pci_release_regions(pdev); 5127 } 5128 #endif 5129 } 5130 5131 /* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt 5132 * delivery. This is essentially a stripped down version of the PCI remove() 5133 * function where we do the minimal amount of work necessary to shutdown any 5134 * further activity. 5135 */ 5136 static void shutdown_one(struct pci_dev *pdev) 5137 { 5138 struct adapter *adapter = pci_get_drvdata(pdev); 5139 5140 /* As with remove_one() above (see extended comment), we only want do 5141 * do cleanup on PCI Devices which went all the way through init_one() 5142 * ... 5143 */ 5144 if (!adapter) { 5145 pci_release_regions(pdev); 5146 return; 5147 } 5148 5149 if (adapter->pf == 4) { 5150 int i; 5151 5152 for_each_port(adapter, i) 5153 if (adapter->port[i]->reg_state == NETREG_REGISTERED) 5154 cxgb_close(adapter->port[i]); 5155 5156 t4_uld_clean_up(adapter); 5157 disable_interrupts(adapter); 5158 disable_msi(adapter); 5159 5160 t4_sge_stop(adapter); 5161 if (adapter->flags & FW_OK) 5162 t4_fw_bye(adapter, adapter->mbox); 5163 } 5164 #ifdef CONFIG_PCI_IOV 5165 else { 5166 if (adapter->port[0]) 5167 unregister_netdev(adapter->port[0]); 5168 iounmap(adapter->regs); 5169 kfree(adapter->vfinfo); 5170 kfree(adapter); 5171 pci_disable_sriov(pdev); 5172 pci_release_regions(pdev); 5173 } 5174 #endif 5175 } 5176 5177 static struct pci_driver cxgb4_driver = { 5178 .name = KBUILD_MODNAME, 5179 .id_table = cxgb4_pci_tbl, 5180 .probe = init_one, 5181 .remove = remove_one, 5182 .shutdown = shutdown_one, 5183 #ifdef CONFIG_PCI_IOV 5184 .sriov_configure = cxgb4_iov_configure, 5185 #endif 5186 .err_handler = &cxgb4_eeh, 5187 }; 5188 5189 static int __init cxgb4_init_module(void) 5190 { 5191 int ret; 5192 5193 /* Debugfs support is optional, just warn if this fails */ 5194 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); 5195 if (!cxgb4_debugfs_root) 5196 pr_warn("could not create debugfs entry, continuing\n"); 5197 5198 ret = pci_register_driver(&cxgb4_driver); 5199 if (ret < 0) 5200 debugfs_remove(cxgb4_debugfs_root); 5201 5202 #if IS_ENABLED(CONFIG_IPV6) 5203 if (!inet6addr_registered) { 5204 register_inet6addr_notifier(&cxgb4_inet6addr_notifier); 5205 inet6addr_registered = true; 5206 } 5207 #endif 5208 5209 return ret; 5210 } 5211 5212 static void __exit cxgb4_cleanup_module(void) 5213 { 5214 #if IS_ENABLED(CONFIG_IPV6) 5215 if (inet6addr_registered) { 5216 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier); 5217 inet6addr_registered = false; 5218 } 5219 #endif 5220 pci_unregister_driver(&cxgb4_driver); 5221 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ 5222 } 5223 5224 module_init(cxgb4_init_module); 5225 module_exit(cxgb4_cleanup_module); 5226