1 /************************************************************************* 2 * myri10ge.c: Myricom Myri-10G Ethernet driver. 3 * 4 * Copyright (C) 2005 - 2011 Myricom, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of Myricom, Inc. nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * 32 * If the eeprom on your board is not recent enough, you will need to get a 33 * newer firmware image at: 34 * http://www.myri.com/scs/download-Myri10GE.html 35 * 36 * Contact Information: 37 * <help@myri.com> 38 * Myricom, Inc., 325N Santa Anita Avenue, Arcadia, CA 91006 39 *************************************************************************/ 40 41 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 42 43 #include <linux/tcp.h> 44 #include <linux/netdevice.h> 45 #include <linux/skbuff.h> 46 #include <linux/string.h> 47 #include <linux/module.h> 48 #include <linux/pci.h> 49 #include <linux/dma-mapping.h> 50 #include <linux/etherdevice.h> 51 #include <linux/if_ether.h> 52 #include <linux/if_vlan.h> 53 #include <linux/dca.h> 54 #include <linux/ip.h> 55 #include <linux/inet.h> 56 #include <linux/in.h> 57 #include <linux/ethtool.h> 58 #include <linux/firmware.h> 59 #include <linux/delay.h> 60 #include <linux/timer.h> 61 #include <linux/vmalloc.h> 62 #include <linux/crc32.h> 63 #include <linux/moduleparam.h> 64 #include <linux/io.h> 65 #include <linux/log2.h> 66 #include <linux/slab.h> 67 #include <linux/prefetch.h> 68 #include <net/checksum.h> 69 #include <net/ip.h> 70 #include <net/tcp.h> 71 #include <asm/byteorder.h> 72 #include <asm/io.h> 73 #include <asm/processor.h> 74 #ifdef CONFIG_MTRR 75 #include <asm/mtrr.h> 76 #endif 77 78 #include "myri10ge_mcp.h" 79 #include "myri10ge_mcp_gen_header.h" 80 81 #define MYRI10GE_VERSION_STR "1.5.3-1.534" 82 83 MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 84 MODULE_AUTHOR("Maintainer: help@myri.com"); 85 MODULE_VERSION(MYRI10GE_VERSION_STR); 86 MODULE_LICENSE("Dual BSD/GPL"); 87 88 #define MYRI10GE_MAX_ETHER_MTU 9014 89 90 #define MYRI10GE_ETH_STOPPED 0 91 #define MYRI10GE_ETH_STOPPING 1 92 #define MYRI10GE_ETH_STARTING 2 93 #define MYRI10GE_ETH_RUNNING 3 94 #define MYRI10GE_ETH_OPEN_FAILED 4 95 96 #define MYRI10GE_EEPROM_STRINGS_SIZE 256 97 #define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2) 98 99 #define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff) 100 #define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff 101 102 #define MYRI10GE_ALLOC_ORDER 0 103 #define MYRI10GE_ALLOC_SIZE ((1 << MYRI10GE_ALLOC_ORDER) * PAGE_SIZE) 104 #define MYRI10GE_MAX_FRAGS_PER_FRAME (MYRI10GE_MAX_ETHER_MTU/MYRI10GE_ALLOC_SIZE + 1) 105 106 #define MYRI10GE_MAX_SLICES 32 107 108 struct myri10ge_rx_buffer_state { 109 struct page *page; 110 int page_offset; 111 DEFINE_DMA_UNMAP_ADDR(bus); 112 DEFINE_DMA_UNMAP_LEN(len); 113 }; 114 115 struct myri10ge_tx_buffer_state { 116 struct sk_buff *skb; 117 int last; 118 DEFINE_DMA_UNMAP_ADDR(bus); 119 DEFINE_DMA_UNMAP_LEN(len); 120 }; 121 122 struct myri10ge_cmd { 123 u32 data0; 124 u32 data1; 125 u32 data2; 126 }; 127 128 struct myri10ge_rx_buf { 129 struct mcp_kreq_ether_recv __iomem *lanai; /* lanai ptr for recv ring */ 130 struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */ 131 struct myri10ge_rx_buffer_state *info; 132 struct page *page; 133 dma_addr_t bus; 134 int page_offset; 135 int cnt; 136 int fill_cnt; 137 int alloc_fail; 138 int mask; /* number of rx slots -1 */ 139 int watchdog_needed; 140 }; 141 142 struct myri10ge_tx_buf { 143 struct mcp_kreq_ether_send __iomem *lanai; /* lanai ptr for sendq */ 144 __be32 __iomem *send_go; /* "go" doorbell ptr */ 145 __be32 __iomem *send_stop; /* "stop" doorbell ptr */ 146 struct mcp_kreq_ether_send *req_list; /* host shadow of sendq */ 147 char *req_bytes; 148 struct myri10ge_tx_buffer_state *info; 149 int mask; /* number of transmit slots -1 */ 150 int req ____cacheline_aligned; /* transmit slots submitted */ 151 int pkt_start; /* packets started */ 152 int stop_queue; 153 int linearized; 154 int done ____cacheline_aligned; /* transmit slots completed */ 155 int pkt_done; /* packets completed */ 156 int wake_queue; 157 int queue_active; 158 }; 159 160 struct myri10ge_rx_done { 161 struct mcp_slot *entry; 162 dma_addr_t bus; 163 int cnt; 164 int idx; 165 }; 166 167 struct myri10ge_slice_netstats { 168 unsigned long rx_packets; 169 unsigned long tx_packets; 170 unsigned long rx_bytes; 171 unsigned long tx_bytes; 172 unsigned long rx_dropped; 173 unsigned long tx_dropped; 174 }; 175 176 struct myri10ge_slice_state { 177 struct myri10ge_tx_buf tx; /* transmit ring */ 178 struct myri10ge_rx_buf rx_small; 179 struct myri10ge_rx_buf rx_big; 180 struct myri10ge_rx_done rx_done; 181 struct net_device *dev; 182 struct napi_struct napi; 183 struct myri10ge_priv *mgp; 184 struct myri10ge_slice_netstats stats; 185 __be32 __iomem *irq_claim; 186 struct mcp_irq_data *fw_stats; 187 dma_addr_t fw_stats_bus; 188 int watchdog_tx_done; 189 int watchdog_tx_req; 190 int watchdog_rx_done; 191 int stuck; 192 #ifdef CONFIG_MYRI10GE_DCA 193 int cached_dca_tag; 194 int cpu; 195 __be32 __iomem *dca_tag; 196 #endif 197 char irq_desc[32]; 198 }; 199 200 struct myri10ge_priv { 201 struct myri10ge_slice_state *ss; 202 int tx_boundary; /* boundary transmits cannot cross */ 203 int num_slices; 204 int running; /* running? */ 205 int small_bytes; 206 int big_bytes; 207 int max_intr_slots; 208 struct net_device *dev; 209 u8 __iomem *sram; 210 int sram_size; 211 unsigned long board_span; 212 unsigned long iomem_base; 213 __be32 __iomem *irq_deassert; 214 char *mac_addr_string; 215 struct mcp_cmd_response *cmd; 216 dma_addr_t cmd_bus; 217 struct pci_dev *pdev; 218 int msi_enabled; 219 int msix_enabled; 220 struct msix_entry *msix_vectors; 221 #ifdef CONFIG_MYRI10GE_DCA 222 int dca_enabled; 223 int relaxed_order; 224 #endif 225 u32 link_state; 226 unsigned int rdma_tags_available; 227 int intr_coal_delay; 228 __be32 __iomem *intr_coal_delay_ptr; 229 int mtrr; 230 int wc_enabled; 231 int down_cnt; 232 wait_queue_head_t down_wq; 233 struct work_struct watchdog_work; 234 struct timer_list watchdog_timer; 235 int watchdog_resets; 236 int watchdog_pause; 237 int pause; 238 bool fw_name_allocated; 239 char *fw_name; 240 char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE]; 241 char *product_code_string; 242 char fw_version[128]; 243 int fw_ver_major; 244 int fw_ver_minor; 245 int fw_ver_tiny; 246 int adopted_rx_filter_bug; 247 u8 mac_addr[6]; /* eeprom mac address */ 248 unsigned long serial_number; 249 int vendor_specific_offset; 250 int fw_multicast_support; 251 u32 features; 252 u32 max_tso6; 253 u32 read_dma; 254 u32 write_dma; 255 u32 read_write_dma; 256 u32 link_changes; 257 u32 msg_enable; 258 unsigned int board_number; 259 int rebooted; 260 }; 261 262 static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat"; 263 static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat"; 264 static char *myri10ge_fw_rss_unaligned = "myri10ge_rss_ethp_z8e.dat"; 265 static char *myri10ge_fw_rss_aligned = "myri10ge_rss_eth_z8e.dat"; 266 MODULE_FIRMWARE("myri10ge_ethp_z8e.dat"); 267 MODULE_FIRMWARE("myri10ge_eth_z8e.dat"); 268 MODULE_FIRMWARE("myri10ge_rss_ethp_z8e.dat"); 269 MODULE_FIRMWARE("myri10ge_rss_eth_z8e.dat"); 270 271 /* Careful: must be accessed under kparam_block_sysfs_write */ 272 static char *myri10ge_fw_name = NULL; 273 module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR); 274 MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name"); 275 276 #define MYRI10GE_MAX_BOARDS 8 277 static char *myri10ge_fw_names[MYRI10GE_MAX_BOARDS] = 278 {[0 ... (MYRI10GE_MAX_BOARDS - 1)] = NULL }; 279 module_param_array_named(myri10ge_fw_names, myri10ge_fw_names, charp, NULL, 280 0444); 281 MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image names per board"); 282 283 static int myri10ge_ecrc_enable = 1; 284 module_param(myri10ge_ecrc_enable, int, S_IRUGO); 285 MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E"); 286 287 static int myri10ge_small_bytes = -1; /* -1 == auto */ 288 module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR); 289 MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets"); 290 291 static int myri10ge_msi = 1; /* enable msi by default */ 292 module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR); 293 MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts"); 294 295 static int myri10ge_intr_coal_delay = 75; 296 module_param(myri10ge_intr_coal_delay, int, S_IRUGO); 297 MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay"); 298 299 static int myri10ge_flow_control = 1; 300 module_param(myri10ge_flow_control, int, S_IRUGO); 301 MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter"); 302 303 static int myri10ge_deassert_wait = 1; 304 module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR); 305 MODULE_PARM_DESC(myri10ge_deassert_wait, 306 "Wait when deasserting legacy interrupts"); 307 308 static int myri10ge_force_firmware = 0; 309 module_param(myri10ge_force_firmware, int, S_IRUGO); 310 MODULE_PARM_DESC(myri10ge_force_firmware, 311 "Force firmware to assume aligned completions"); 312 313 static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; 314 module_param(myri10ge_initial_mtu, int, S_IRUGO); 315 MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU"); 316 317 static int myri10ge_napi_weight = 64; 318 module_param(myri10ge_napi_weight, int, S_IRUGO); 319 MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight"); 320 321 static int myri10ge_watchdog_timeout = 1; 322 module_param(myri10ge_watchdog_timeout, int, S_IRUGO); 323 MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout"); 324 325 static int myri10ge_max_irq_loops = 1048576; 326 module_param(myri10ge_max_irq_loops, int, S_IRUGO); 327 MODULE_PARM_DESC(myri10ge_max_irq_loops, 328 "Set stuck legacy IRQ detection threshold"); 329 330 #define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK 331 332 static int myri10ge_debug = -1; /* defaults above */ 333 module_param(myri10ge_debug, int, 0); 334 MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)"); 335 336 static int myri10ge_fill_thresh = 256; 337 module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR); 338 MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed"); 339 340 static int myri10ge_reset_recover = 1; 341 342 static int myri10ge_max_slices = 1; 343 module_param(myri10ge_max_slices, int, S_IRUGO); 344 MODULE_PARM_DESC(myri10ge_max_slices, "Max tx/rx queues"); 345 346 static int myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT; 347 module_param(myri10ge_rss_hash, int, S_IRUGO); 348 MODULE_PARM_DESC(myri10ge_rss_hash, "Type of RSS hashing to do"); 349 350 static int myri10ge_dca = 1; 351 module_param(myri10ge_dca, int, S_IRUGO); 352 MODULE_PARM_DESC(myri10ge_dca, "Enable DCA if possible"); 353 354 #define MYRI10GE_FW_OFFSET 1024*1024 355 #define MYRI10GE_HIGHPART_TO_U32(X) \ 356 (sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0) 357 #define MYRI10GE_LOWPART_TO_U32(X) ((u32)(X)) 358 359 #define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8) 360 361 static void myri10ge_set_multicast_list(struct net_device *dev); 362 static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb, 363 struct net_device *dev); 364 365 static inline void put_be32(__be32 val, __be32 __iomem * p) 366 { 367 __raw_writel((__force __u32) val, (__force void __iomem *)p); 368 } 369 370 static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev, 371 struct rtnl_link_stats64 *stats); 372 373 static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated) 374 { 375 if (mgp->fw_name_allocated) 376 kfree(mgp->fw_name); 377 mgp->fw_name = name; 378 mgp->fw_name_allocated = allocated; 379 } 380 381 static int 382 myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd, 383 struct myri10ge_cmd *data, int atomic) 384 { 385 struct mcp_cmd *buf; 386 char buf_bytes[sizeof(*buf) + 8]; 387 struct mcp_cmd_response *response = mgp->cmd; 388 char __iomem *cmd_addr = mgp->sram + MXGEFW_ETH_CMD; 389 u32 dma_low, dma_high, result, value; 390 int sleep_total = 0; 391 392 /* ensure buf is aligned to 8 bytes */ 393 buf = (struct mcp_cmd *)ALIGN((unsigned long)buf_bytes, 8); 394 395 buf->data0 = htonl(data->data0); 396 buf->data1 = htonl(data->data1); 397 buf->data2 = htonl(data->data2); 398 buf->cmd = htonl(cmd); 399 dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus); 400 dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus); 401 402 buf->response_addr.low = htonl(dma_low); 403 buf->response_addr.high = htonl(dma_high); 404 response->result = htonl(MYRI10GE_NO_RESPONSE_RESULT); 405 mb(); 406 myri10ge_pio_copy(cmd_addr, buf, sizeof(*buf)); 407 408 /* wait up to 15ms. Longest command is the DMA benchmark, 409 * which is capped at 5ms, but runs from a timeout handler 410 * that runs every 7.8ms. So a 15ms timeout leaves us with 411 * a 2.2ms margin 412 */ 413 if (atomic) { 414 /* if atomic is set, do not sleep, 415 * and try to get the completion quickly 416 * (1ms will be enough for those commands) */ 417 for (sleep_total = 0; 418 sleep_total < 1000 && 419 response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT); 420 sleep_total += 10) { 421 udelay(10); 422 mb(); 423 } 424 } else { 425 /* use msleep for most command */ 426 for (sleep_total = 0; 427 sleep_total < 15 && 428 response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT); 429 sleep_total++) 430 msleep(1); 431 } 432 433 result = ntohl(response->result); 434 value = ntohl(response->data); 435 if (result != MYRI10GE_NO_RESPONSE_RESULT) { 436 if (result == 0) { 437 data->data0 = value; 438 return 0; 439 } else if (result == MXGEFW_CMD_UNKNOWN) { 440 return -ENOSYS; 441 } else if (result == MXGEFW_CMD_ERROR_UNALIGNED) { 442 return -E2BIG; 443 } else if (result == MXGEFW_CMD_ERROR_RANGE && 444 cmd == MXGEFW_CMD_ENABLE_RSS_QUEUES && 445 (data-> 446 data1 & MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES) != 447 0) { 448 return -ERANGE; 449 } else { 450 dev_err(&mgp->pdev->dev, 451 "command %d failed, result = %d\n", 452 cmd, result); 453 return -ENXIO; 454 } 455 } 456 457 dev_err(&mgp->pdev->dev, "command %d timed out, result = %d\n", 458 cmd, result); 459 return -EAGAIN; 460 } 461 462 /* 463 * The eeprom strings on the lanaiX have the format 464 * SN=x\0 465 * MAC=x:x:x:x:x:x\0 466 * PT:ddd mmm xx xx:xx:xx xx\0 467 * PV:ddd mmm xx xx:xx:xx xx\0 468 */ 469 static int myri10ge_read_mac_addr(struct myri10ge_priv *mgp) 470 { 471 char *ptr, *limit; 472 int i; 473 474 ptr = mgp->eeprom_strings; 475 limit = mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE; 476 477 while (*ptr != '\0' && ptr < limit) { 478 if (memcmp(ptr, "MAC=", 4) == 0) { 479 ptr += 4; 480 mgp->mac_addr_string = ptr; 481 for (i = 0; i < 6; i++) { 482 if ((ptr + 2) > limit) 483 goto abort; 484 mgp->mac_addr[i] = 485 simple_strtoul(ptr, &ptr, 16); 486 ptr += 1; 487 } 488 } 489 if (memcmp(ptr, "PC=", 3) == 0) { 490 ptr += 3; 491 mgp->product_code_string = ptr; 492 } 493 if (memcmp((const void *)ptr, "SN=", 3) == 0) { 494 ptr += 3; 495 mgp->serial_number = simple_strtoul(ptr, &ptr, 10); 496 } 497 while (ptr < limit && *ptr++) ; 498 } 499 500 return 0; 501 502 abort: 503 dev_err(&mgp->pdev->dev, "failed to parse eeprom_strings\n"); 504 return -ENXIO; 505 } 506 507 /* 508 * Enable or disable periodic RDMAs from the host to make certain 509 * chipsets resend dropped PCIe messages 510 */ 511 512 static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable) 513 { 514 char __iomem *submit; 515 __be32 buf[16] __attribute__ ((__aligned__(8))); 516 u32 dma_low, dma_high; 517 int i; 518 519 /* clear confirmation addr */ 520 mgp->cmd->data = 0; 521 mb(); 522 523 /* send a rdma command to the PCIe engine, and wait for the 524 * response in the confirmation address. The firmware should 525 * write a -1 there to indicate it is alive and well 526 */ 527 dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus); 528 dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus); 529 530 buf[0] = htonl(dma_high); /* confirm addr MSW */ 531 buf[1] = htonl(dma_low); /* confirm addr LSW */ 532 buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */ 533 buf[3] = htonl(dma_high); /* dummy addr MSW */ 534 buf[4] = htonl(dma_low); /* dummy addr LSW */ 535 buf[5] = htonl(enable); /* enable? */ 536 537 submit = mgp->sram + MXGEFW_BOOT_DUMMY_RDMA; 538 539 myri10ge_pio_copy(submit, &buf, sizeof(buf)); 540 for (i = 0; mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20; i++) 541 msleep(1); 542 if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) 543 dev_err(&mgp->pdev->dev, "dummy rdma %s failed\n", 544 (enable ? "enable" : "disable")); 545 } 546 547 static int 548 myri10ge_validate_firmware(struct myri10ge_priv *mgp, 549 struct mcp_gen_header *hdr) 550 { 551 struct device *dev = &mgp->pdev->dev; 552 553 /* check firmware type */ 554 if (ntohl(hdr->mcp_type) != MCP_TYPE_ETH) { 555 dev_err(dev, "Bad firmware type: 0x%x\n", ntohl(hdr->mcp_type)); 556 return -EINVAL; 557 } 558 559 /* save firmware version for ethtool */ 560 strncpy(mgp->fw_version, hdr->version, sizeof(mgp->fw_version)); 561 562 sscanf(mgp->fw_version, "%d.%d.%d", &mgp->fw_ver_major, 563 &mgp->fw_ver_minor, &mgp->fw_ver_tiny); 564 565 if (!(mgp->fw_ver_major == MXGEFW_VERSION_MAJOR && 566 mgp->fw_ver_minor == MXGEFW_VERSION_MINOR)) { 567 dev_err(dev, "Found firmware version %s\n", mgp->fw_version); 568 dev_err(dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR, 569 MXGEFW_VERSION_MINOR); 570 return -EINVAL; 571 } 572 return 0; 573 } 574 575 static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size) 576 { 577 unsigned crc, reread_crc; 578 const struct firmware *fw; 579 struct device *dev = &mgp->pdev->dev; 580 unsigned char *fw_readback; 581 struct mcp_gen_header *hdr; 582 size_t hdr_offset; 583 int status; 584 unsigned i; 585 586 if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) { 587 dev_err(dev, "Unable to load %s firmware image via hotplug\n", 588 mgp->fw_name); 589 status = -EINVAL; 590 goto abort_with_nothing; 591 } 592 593 /* check size */ 594 595 if (fw->size >= mgp->sram_size - MYRI10GE_FW_OFFSET || 596 fw->size < MCP_HEADER_PTR_OFFSET + 4) { 597 dev_err(dev, "Firmware size invalid:%d\n", (int)fw->size); 598 status = -EINVAL; 599 goto abort_with_fw; 600 } 601 602 /* check id */ 603 hdr_offset = ntohl(*(__be32 *) (fw->data + MCP_HEADER_PTR_OFFSET)); 604 if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > fw->size) { 605 dev_err(dev, "Bad firmware file\n"); 606 status = -EINVAL; 607 goto abort_with_fw; 608 } 609 hdr = (void *)(fw->data + hdr_offset); 610 611 status = myri10ge_validate_firmware(mgp, hdr); 612 if (status != 0) 613 goto abort_with_fw; 614 615 crc = crc32(~0, fw->data, fw->size); 616 for (i = 0; i < fw->size; i += 256) { 617 myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET + i, 618 fw->data + i, 619 min(256U, (unsigned)(fw->size - i))); 620 mb(); 621 readb(mgp->sram); 622 } 623 fw_readback = vmalloc(fw->size); 624 if (!fw_readback) { 625 status = -ENOMEM; 626 goto abort_with_fw; 627 } 628 /* corruption checking is good for parity recovery and buggy chipset */ 629 memcpy_fromio(fw_readback, mgp->sram + MYRI10GE_FW_OFFSET, fw->size); 630 reread_crc = crc32(~0, fw_readback, fw->size); 631 vfree(fw_readback); 632 if (crc != reread_crc) { 633 dev_err(dev, "CRC failed(fw-len=%u), got 0x%x (expect 0x%x)\n", 634 (unsigned)fw->size, reread_crc, crc); 635 status = -EIO; 636 goto abort_with_fw; 637 } 638 *size = (u32) fw->size; 639 640 abort_with_fw: 641 release_firmware(fw); 642 643 abort_with_nothing: 644 return status; 645 } 646 647 static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp) 648 { 649 struct mcp_gen_header *hdr; 650 struct device *dev = &mgp->pdev->dev; 651 const size_t bytes = sizeof(struct mcp_gen_header); 652 size_t hdr_offset; 653 int status; 654 655 /* find running firmware header */ 656 hdr_offset = swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET)); 657 658 if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > mgp->sram_size) { 659 dev_err(dev, "Running firmware has bad header offset (%d)\n", 660 (int)hdr_offset); 661 return -EIO; 662 } 663 664 /* copy header of running firmware from SRAM to host memory to 665 * validate firmware */ 666 hdr = kmalloc(bytes, GFP_KERNEL); 667 if (hdr == NULL) 668 return -ENOMEM; 669 670 memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes); 671 status = myri10ge_validate_firmware(mgp, hdr); 672 kfree(hdr); 673 674 /* check to see if adopted firmware has bug where adopting 675 * it will cause broadcasts to be filtered unless the NIC 676 * is kept in ALLMULTI mode */ 677 if (mgp->fw_ver_major == 1 && mgp->fw_ver_minor == 4 && 678 mgp->fw_ver_tiny >= 4 && mgp->fw_ver_tiny <= 11) { 679 mgp->adopted_rx_filter_bug = 1; 680 dev_warn(dev, "Adopting fw %d.%d.%d: " 681 "working around rx filter bug\n", 682 mgp->fw_ver_major, mgp->fw_ver_minor, 683 mgp->fw_ver_tiny); 684 } 685 return status; 686 } 687 688 static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp) 689 { 690 struct myri10ge_cmd cmd; 691 int status; 692 693 /* probe for IPv6 TSO support */ 694 mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO; 695 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, 696 &cmd, 0); 697 if (status == 0) { 698 mgp->max_tso6 = cmd.data0; 699 mgp->features |= NETIF_F_TSO6; 700 } 701 702 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); 703 if (status != 0) { 704 dev_err(&mgp->pdev->dev, 705 "failed MXGEFW_CMD_GET_RX_RING_SIZE\n"); 706 return -ENXIO; 707 } 708 709 mgp->max_intr_slots = 2 * (cmd.data0 / sizeof(struct mcp_dma_addr)); 710 711 return 0; 712 } 713 714 static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt) 715 { 716 char __iomem *submit; 717 __be32 buf[16] __attribute__ ((__aligned__(8))); 718 u32 dma_low, dma_high, size; 719 int status, i; 720 721 size = 0; 722 status = myri10ge_load_hotplug_firmware(mgp, &size); 723 if (status) { 724 if (!adopt) 725 return status; 726 dev_warn(&mgp->pdev->dev, "hotplug firmware loading failed\n"); 727 728 /* Do not attempt to adopt firmware if there 729 * was a bad crc */ 730 if (status == -EIO) 731 return status; 732 733 status = myri10ge_adopt_running_firmware(mgp); 734 if (status != 0) { 735 dev_err(&mgp->pdev->dev, 736 "failed to adopt running firmware\n"); 737 return status; 738 } 739 dev_info(&mgp->pdev->dev, 740 "Successfully adopted running firmware\n"); 741 if (mgp->tx_boundary == 4096) { 742 dev_warn(&mgp->pdev->dev, 743 "Using firmware currently running on NIC" 744 ". For optimal\n"); 745 dev_warn(&mgp->pdev->dev, 746 "performance consider loading optimized " 747 "firmware\n"); 748 dev_warn(&mgp->pdev->dev, "via hotplug\n"); 749 } 750 751 set_fw_name(mgp, "adopted", false); 752 mgp->tx_boundary = 2048; 753 myri10ge_dummy_rdma(mgp, 1); 754 status = myri10ge_get_firmware_capabilities(mgp); 755 return status; 756 } 757 758 /* clear confirmation addr */ 759 mgp->cmd->data = 0; 760 mb(); 761 762 /* send a reload command to the bootstrap MCP, and wait for the 763 * response in the confirmation address. The firmware should 764 * write a -1 there to indicate it is alive and well 765 */ 766 dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus); 767 dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus); 768 769 buf[0] = htonl(dma_high); /* confirm addr MSW */ 770 buf[1] = htonl(dma_low); /* confirm addr LSW */ 771 buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */ 772 773 /* FIX: All newest firmware should un-protect the bottom of 774 * the sram before handoff. However, the very first interfaces 775 * do not. Therefore the handoff copy must skip the first 8 bytes 776 */ 777 buf[3] = htonl(MYRI10GE_FW_OFFSET + 8); /* where the code starts */ 778 buf[4] = htonl(size - 8); /* length of code */ 779 buf[5] = htonl(8); /* where to copy to */ 780 buf[6] = htonl(0); /* where to jump to */ 781 782 submit = mgp->sram + MXGEFW_BOOT_HANDOFF; 783 784 myri10ge_pio_copy(submit, &buf, sizeof(buf)); 785 mb(); 786 msleep(1); 787 mb(); 788 i = 0; 789 while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 9) { 790 msleep(1 << i); 791 i++; 792 } 793 if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) { 794 dev_err(&mgp->pdev->dev, "handoff failed\n"); 795 return -ENXIO; 796 } 797 myri10ge_dummy_rdma(mgp, 1); 798 status = myri10ge_get_firmware_capabilities(mgp); 799 800 return status; 801 } 802 803 static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr) 804 { 805 struct myri10ge_cmd cmd; 806 int status; 807 808 cmd.data0 = ((addr[0] << 24) | (addr[1] << 16) 809 | (addr[2] << 8) | addr[3]); 810 811 cmd.data1 = ((addr[4] << 8) | (addr[5])); 812 813 status = myri10ge_send_cmd(mgp, MXGEFW_SET_MAC_ADDRESS, &cmd, 0); 814 return status; 815 } 816 817 static int myri10ge_change_pause(struct myri10ge_priv *mgp, int pause) 818 { 819 struct myri10ge_cmd cmd; 820 int status, ctl; 821 822 ctl = pause ? MXGEFW_ENABLE_FLOW_CONTROL : MXGEFW_DISABLE_FLOW_CONTROL; 823 status = myri10ge_send_cmd(mgp, ctl, &cmd, 0); 824 825 if (status) { 826 netdev_err(mgp->dev, "Failed to set flow control mode\n"); 827 return status; 828 } 829 mgp->pause = pause; 830 return 0; 831 } 832 833 static void 834 myri10ge_change_promisc(struct myri10ge_priv *mgp, int promisc, int atomic) 835 { 836 struct myri10ge_cmd cmd; 837 int status, ctl; 838 839 ctl = promisc ? MXGEFW_ENABLE_PROMISC : MXGEFW_DISABLE_PROMISC; 840 status = myri10ge_send_cmd(mgp, ctl, &cmd, atomic); 841 if (status) 842 netdev_err(mgp->dev, "Failed to set promisc mode\n"); 843 } 844 845 static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type) 846 { 847 struct myri10ge_cmd cmd; 848 int status; 849 u32 len; 850 struct page *dmatest_page; 851 dma_addr_t dmatest_bus; 852 char *test = " "; 853 854 dmatest_page = alloc_page(GFP_KERNEL); 855 if (!dmatest_page) 856 return -ENOMEM; 857 dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE, 858 DMA_BIDIRECTIONAL); 859 860 /* Run a small DMA test. 861 * The magic multipliers to the length tell the firmware 862 * to do DMA read, write, or read+write tests. The 863 * results are returned in cmd.data0. The upper 16 864 * bits or the return is the number of transfers completed. 865 * The lower 16 bits is the time in 0.5us ticks that the 866 * transfers took to complete. 867 */ 868 869 len = mgp->tx_boundary; 870 871 cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus); 872 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus); 873 cmd.data2 = len * 0x10000; 874 status = myri10ge_send_cmd(mgp, test_type, &cmd, 0); 875 if (status != 0) { 876 test = "read"; 877 goto abort; 878 } 879 mgp->read_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff); 880 cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus); 881 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus); 882 cmd.data2 = len * 0x1; 883 status = myri10ge_send_cmd(mgp, test_type, &cmd, 0); 884 if (status != 0) { 885 test = "write"; 886 goto abort; 887 } 888 mgp->write_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff); 889 890 cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus); 891 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus); 892 cmd.data2 = len * 0x10001; 893 status = myri10ge_send_cmd(mgp, test_type, &cmd, 0); 894 if (status != 0) { 895 test = "read/write"; 896 goto abort; 897 } 898 mgp->read_write_dma = ((cmd.data0 >> 16) * len * 2 * 2) / 899 (cmd.data0 & 0xffff); 900 901 abort: 902 pci_unmap_page(mgp->pdev, dmatest_bus, PAGE_SIZE, DMA_BIDIRECTIONAL); 903 put_page(dmatest_page); 904 905 if (status != 0 && test_type != MXGEFW_CMD_UNALIGNED_TEST) 906 dev_warn(&mgp->pdev->dev, "DMA %s benchmark failed: %d\n", 907 test, status); 908 909 return status; 910 } 911 912 static int myri10ge_reset(struct myri10ge_priv *mgp) 913 { 914 struct myri10ge_cmd cmd; 915 struct myri10ge_slice_state *ss; 916 int i, status; 917 size_t bytes; 918 #ifdef CONFIG_MYRI10GE_DCA 919 unsigned long dca_tag_off; 920 #endif 921 922 /* try to send a reset command to the card to see if it 923 * is alive */ 924 memset(&cmd, 0, sizeof(cmd)); 925 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0); 926 if (status != 0) { 927 dev_err(&mgp->pdev->dev, "failed reset\n"); 928 return -ENXIO; 929 } 930 931 (void)myri10ge_dma_test(mgp, MXGEFW_DMA_TEST); 932 /* 933 * Use non-ndis mcp_slot (eg, 4 bytes total, 934 * no toeplitz hash value returned. Older firmware will 935 * not understand this command, but will use the correct 936 * sized mcp_slot, so we ignore error returns 937 */ 938 cmd.data0 = MXGEFW_RSS_MCP_SLOT_TYPE_MIN; 939 (void)myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE, &cmd, 0); 940 941 /* Now exchange information about interrupts */ 942 943 bytes = mgp->max_intr_slots * sizeof(*mgp->ss[0].rx_done.entry); 944 cmd.data0 = (u32) bytes; 945 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0); 946 947 /* 948 * Even though we already know how many slices are supported 949 * via myri10ge_probe_slices() MXGEFW_CMD_GET_MAX_RSS_QUEUES 950 * has magic side effects, and must be called after a reset. 951 * It must be called prior to calling any RSS related cmds, 952 * including assigning an interrupt queue for anything but 953 * slice 0. It must also be called *after* 954 * MXGEFW_CMD_SET_INTRQ_SIZE, since the intrq size is used by 955 * the firmware to compute offsets. 956 */ 957 958 if (mgp->num_slices > 1) { 959 960 /* ask the maximum number of slices it supports */ 961 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES, 962 &cmd, 0); 963 if (status != 0) { 964 dev_err(&mgp->pdev->dev, 965 "failed to get number of slices\n"); 966 } 967 968 /* 969 * MXGEFW_CMD_ENABLE_RSS_QUEUES must be called prior 970 * to setting up the interrupt queue DMA 971 */ 972 973 cmd.data0 = mgp->num_slices; 974 cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE; 975 if (mgp->dev->real_num_tx_queues > 1) 976 cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES; 977 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES, 978 &cmd, 0); 979 980 /* Firmware older than 1.4.32 only supports multiple 981 * RX queues, so if we get an error, first retry using a 982 * single TX queue before giving up */ 983 if (status != 0 && mgp->dev->real_num_tx_queues > 1) { 984 netif_set_real_num_tx_queues(mgp->dev, 1); 985 cmd.data0 = mgp->num_slices; 986 cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE; 987 status = myri10ge_send_cmd(mgp, 988 MXGEFW_CMD_ENABLE_RSS_QUEUES, 989 &cmd, 0); 990 } 991 992 if (status != 0) { 993 dev_err(&mgp->pdev->dev, 994 "failed to set number of slices\n"); 995 996 return status; 997 } 998 } 999 for (i = 0; i < mgp->num_slices; i++) { 1000 ss = &mgp->ss[i]; 1001 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->rx_done.bus); 1002 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->rx_done.bus); 1003 cmd.data2 = i; 1004 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA, 1005 &cmd, 0); 1006 } 1007 1008 status |= 1009 myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0); 1010 for (i = 0; i < mgp->num_slices; i++) { 1011 ss = &mgp->ss[i]; 1012 ss->irq_claim = 1013 (__iomem __be32 *) (mgp->sram + cmd.data0 + 8 * i); 1014 } 1015 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, 1016 &cmd, 0); 1017 mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0); 1018 1019 status |= myri10ge_send_cmd 1020 (mgp, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd, 0); 1021 mgp->intr_coal_delay_ptr = (__iomem __be32 *) (mgp->sram + cmd.data0); 1022 if (status != 0) { 1023 dev_err(&mgp->pdev->dev, "failed set interrupt parameters\n"); 1024 return status; 1025 } 1026 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); 1027 1028 #ifdef CONFIG_MYRI10GE_DCA 1029 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0); 1030 dca_tag_off = cmd.data0; 1031 for (i = 0; i < mgp->num_slices; i++) { 1032 ss = &mgp->ss[i]; 1033 if (status == 0) { 1034 ss->dca_tag = (__iomem __be32 *) 1035 (mgp->sram + dca_tag_off + 4 * i); 1036 } else { 1037 ss->dca_tag = NULL; 1038 } 1039 } 1040 #endif /* CONFIG_MYRI10GE_DCA */ 1041 1042 /* reset mcp/driver shared state back to 0 */ 1043 1044 mgp->link_changes = 0; 1045 for (i = 0; i < mgp->num_slices; i++) { 1046 ss = &mgp->ss[i]; 1047 1048 memset(ss->rx_done.entry, 0, bytes); 1049 ss->tx.req = 0; 1050 ss->tx.done = 0; 1051 ss->tx.pkt_start = 0; 1052 ss->tx.pkt_done = 0; 1053 ss->rx_big.cnt = 0; 1054 ss->rx_small.cnt = 0; 1055 ss->rx_done.idx = 0; 1056 ss->rx_done.cnt = 0; 1057 ss->tx.wake_queue = 0; 1058 ss->tx.stop_queue = 0; 1059 } 1060 1061 status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr); 1062 myri10ge_change_pause(mgp, mgp->pause); 1063 myri10ge_set_multicast_list(mgp->dev); 1064 return status; 1065 } 1066 1067 #ifdef CONFIG_MYRI10GE_DCA 1068 static int myri10ge_toggle_relaxed(struct pci_dev *pdev, int on) 1069 { 1070 int ret; 1071 u16 ctl; 1072 1073 pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &ctl); 1074 1075 ret = (ctl & PCI_EXP_DEVCTL_RELAX_EN) >> 4; 1076 if (ret != on) { 1077 ctl &= ~PCI_EXP_DEVCTL_RELAX_EN; 1078 ctl |= (on << 4); 1079 pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, ctl); 1080 } 1081 return ret; 1082 } 1083 1084 static void 1085 myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag) 1086 { 1087 ss->cached_dca_tag = tag; 1088 put_be32(htonl(tag), ss->dca_tag); 1089 } 1090 1091 static inline void myri10ge_update_dca(struct myri10ge_slice_state *ss) 1092 { 1093 int cpu = get_cpu(); 1094 int tag; 1095 1096 if (cpu != ss->cpu) { 1097 tag = dca3_get_tag(&ss->mgp->pdev->dev, cpu); 1098 if (ss->cached_dca_tag != tag) 1099 myri10ge_write_dca(ss, cpu, tag); 1100 ss->cpu = cpu; 1101 } 1102 put_cpu(); 1103 } 1104 1105 static void myri10ge_setup_dca(struct myri10ge_priv *mgp) 1106 { 1107 int err, i; 1108 struct pci_dev *pdev = mgp->pdev; 1109 1110 if (mgp->ss[0].dca_tag == NULL || mgp->dca_enabled) 1111 return; 1112 if (!myri10ge_dca) { 1113 dev_err(&pdev->dev, "dca disabled by administrator\n"); 1114 return; 1115 } 1116 err = dca_add_requester(&pdev->dev); 1117 if (err) { 1118 if (err != -ENODEV) 1119 dev_err(&pdev->dev, 1120 "dca_add_requester() failed, err=%d\n", err); 1121 return; 1122 } 1123 mgp->relaxed_order = myri10ge_toggle_relaxed(pdev, 0); 1124 mgp->dca_enabled = 1; 1125 for (i = 0; i < mgp->num_slices; i++) { 1126 mgp->ss[i].cpu = -1; 1127 mgp->ss[i].cached_dca_tag = -1; 1128 myri10ge_update_dca(&mgp->ss[i]); 1129 } 1130 } 1131 1132 static void myri10ge_teardown_dca(struct myri10ge_priv *mgp) 1133 { 1134 struct pci_dev *pdev = mgp->pdev; 1135 1136 if (!mgp->dca_enabled) 1137 return; 1138 mgp->dca_enabled = 0; 1139 if (mgp->relaxed_order) 1140 myri10ge_toggle_relaxed(pdev, 1); 1141 dca_remove_requester(&pdev->dev); 1142 } 1143 1144 static int myri10ge_notify_dca_device(struct device *dev, void *data) 1145 { 1146 struct myri10ge_priv *mgp; 1147 unsigned long event; 1148 1149 mgp = dev_get_drvdata(dev); 1150 event = *(unsigned long *)data; 1151 1152 if (event == DCA_PROVIDER_ADD) 1153 myri10ge_setup_dca(mgp); 1154 else if (event == DCA_PROVIDER_REMOVE) 1155 myri10ge_teardown_dca(mgp); 1156 return 0; 1157 } 1158 #endif /* CONFIG_MYRI10GE_DCA */ 1159 1160 static inline void 1161 myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst, 1162 struct mcp_kreq_ether_recv *src) 1163 { 1164 __be32 low; 1165 1166 low = src->addr_low; 1167 src->addr_low = htonl(DMA_BIT_MASK(32)); 1168 myri10ge_pio_copy(dst, src, 4 * sizeof(*src)); 1169 mb(); 1170 myri10ge_pio_copy(dst + 4, src + 4, 4 * sizeof(*src)); 1171 mb(); 1172 src->addr_low = low; 1173 put_be32(low, &dst->addr_low); 1174 mb(); 1175 } 1176 1177 static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum) 1178 { 1179 struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data); 1180 1181 if ((skb->protocol == htons(ETH_P_8021Q)) && 1182 (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) || 1183 vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) { 1184 skb->csum = hw_csum; 1185 skb->ip_summed = CHECKSUM_COMPLETE; 1186 } 1187 } 1188 1189 static void 1190 myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, 1191 int bytes, int watchdog) 1192 { 1193 struct page *page; 1194 int idx; 1195 #if MYRI10GE_ALLOC_SIZE > 4096 1196 int end_offset; 1197 #endif 1198 1199 if (unlikely(rx->watchdog_needed && !watchdog)) 1200 return; 1201 1202 /* try to refill entire ring */ 1203 while (rx->fill_cnt != (rx->cnt + rx->mask + 1)) { 1204 idx = rx->fill_cnt & rx->mask; 1205 if (rx->page_offset + bytes <= MYRI10GE_ALLOC_SIZE) { 1206 /* we can use part of previous page */ 1207 get_page(rx->page); 1208 } else { 1209 /* we need a new page */ 1210 page = 1211 alloc_pages(GFP_ATOMIC | __GFP_COMP, 1212 MYRI10GE_ALLOC_ORDER); 1213 if (unlikely(page == NULL)) { 1214 if (rx->fill_cnt - rx->cnt < 16) 1215 rx->watchdog_needed = 1; 1216 return; 1217 } 1218 rx->page = page; 1219 rx->page_offset = 0; 1220 rx->bus = pci_map_page(mgp->pdev, page, 0, 1221 MYRI10GE_ALLOC_SIZE, 1222 PCI_DMA_FROMDEVICE); 1223 } 1224 rx->info[idx].page = rx->page; 1225 rx->info[idx].page_offset = rx->page_offset; 1226 /* note that this is the address of the start of the 1227 * page */ 1228 dma_unmap_addr_set(&rx->info[idx], bus, rx->bus); 1229 rx->shadow[idx].addr_low = 1230 htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset); 1231 rx->shadow[idx].addr_high = 1232 htonl(MYRI10GE_HIGHPART_TO_U32(rx->bus)); 1233 1234 /* start next packet on a cacheline boundary */ 1235 rx->page_offset += SKB_DATA_ALIGN(bytes); 1236 1237 #if MYRI10GE_ALLOC_SIZE > 4096 1238 /* don't cross a 4KB boundary */ 1239 end_offset = rx->page_offset + bytes - 1; 1240 if ((unsigned)(rx->page_offset ^ end_offset) > 4095) 1241 rx->page_offset = end_offset & ~4095; 1242 #endif 1243 rx->fill_cnt++; 1244 1245 /* copy 8 descriptors to the firmware at a time */ 1246 if ((idx & 7) == 7) { 1247 myri10ge_submit_8rx(&rx->lanai[idx - 7], 1248 &rx->shadow[idx - 7]); 1249 } 1250 } 1251 } 1252 1253 static inline void 1254 myri10ge_unmap_rx_page(struct pci_dev *pdev, 1255 struct myri10ge_rx_buffer_state *info, int bytes) 1256 { 1257 /* unmap the recvd page if we're the only or last user of it */ 1258 if (bytes >= MYRI10GE_ALLOC_SIZE / 2 || 1259 (info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) { 1260 pci_unmap_page(pdev, (dma_unmap_addr(info, bus) 1261 & ~(MYRI10GE_ALLOC_SIZE - 1)), 1262 MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE); 1263 } 1264 } 1265 1266 /* 1267 * GRO does not support acceleration of tagged vlan frames, and 1268 * this NIC does not support vlan tag offload, so we must pop 1269 * the tag ourselves to be able to achieve GRO performance that 1270 * is comparable to LRO. 1271 */ 1272 1273 static inline void 1274 myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb) 1275 { 1276 u8 *va; 1277 struct vlan_ethhdr *veh; 1278 struct skb_frag_struct *frag; 1279 __wsum vsum; 1280 1281 va = addr; 1282 va += MXGEFW_PAD; 1283 veh = (struct vlan_ethhdr *)va; 1284 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) == 1285 NETIF_F_HW_VLAN_CTAG_RX && 1286 veh->h_vlan_proto == htons(ETH_P_8021Q)) { 1287 /* fixup csum if needed */ 1288 if (skb->ip_summed == CHECKSUM_COMPLETE) { 1289 vsum = csum_partial(va + ETH_HLEN, VLAN_HLEN, 0); 1290 skb->csum = csum_sub(skb->csum, vsum); 1291 } 1292 /* pop tag */ 1293 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(veh->h_vlan_TCI)); 1294 memmove(va + VLAN_HLEN, va, 2 * ETH_ALEN); 1295 skb->len -= VLAN_HLEN; 1296 skb->data_len -= VLAN_HLEN; 1297 frag = skb_shinfo(skb)->frags; 1298 frag->page_offset += VLAN_HLEN; 1299 skb_frag_size_set(frag, skb_frag_size(frag) - VLAN_HLEN); 1300 } 1301 } 1302 1303 static inline int 1304 myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) 1305 { 1306 struct myri10ge_priv *mgp = ss->mgp; 1307 struct sk_buff *skb; 1308 struct skb_frag_struct *rx_frags; 1309 struct myri10ge_rx_buf *rx; 1310 int i, idx, remainder, bytes; 1311 struct pci_dev *pdev = mgp->pdev; 1312 struct net_device *dev = mgp->dev; 1313 u8 *va; 1314 1315 if (len <= mgp->small_bytes) { 1316 rx = &ss->rx_small; 1317 bytes = mgp->small_bytes; 1318 } else { 1319 rx = &ss->rx_big; 1320 bytes = mgp->big_bytes; 1321 } 1322 1323 len += MXGEFW_PAD; 1324 idx = rx->cnt & rx->mask; 1325 va = page_address(rx->info[idx].page) + rx->info[idx].page_offset; 1326 prefetch(va); 1327 1328 skb = napi_get_frags(&ss->napi); 1329 if (unlikely(skb == NULL)) { 1330 ss->stats.rx_dropped++; 1331 for (i = 0, remainder = len; remainder > 0; i++) { 1332 myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes); 1333 put_page(rx->info[idx].page); 1334 rx->cnt++; 1335 idx = rx->cnt & rx->mask; 1336 remainder -= MYRI10GE_ALLOC_SIZE; 1337 } 1338 return 0; 1339 } 1340 rx_frags = skb_shinfo(skb)->frags; 1341 /* Fill skb_frag_struct(s) with data from our receive */ 1342 for (i = 0, remainder = len; remainder > 0; i++) { 1343 myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes); 1344 skb_fill_page_desc(skb, i, rx->info[idx].page, 1345 rx->info[idx].page_offset, 1346 remainder < MYRI10GE_ALLOC_SIZE ? 1347 remainder : MYRI10GE_ALLOC_SIZE); 1348 rx->cnt++; 1349 idx = rx->cnt & rx->mask; 1350 remainder -= MYRI10GE_ALLOC_SIZE; 1351 } 1352 1353 /* remove padding */ 1354 rx_frags[0].page_offset += MXGEFW_PAD; 1355 rx_frags[0].size -= MXGEFW_PAD; 1356 len -= MXGEFW_PAD; 1357 1358 skb->len = len; 1359 skb->data_len = len; 1360 skb->truesize += len; 1361 if (dev->features & NETIF_F_RXCSUM) { 1362 skb->ip_summed = CHECKSUM_COMPLETE; 1363 skb->csum = csum; 1364 } 1365 myri10ge_vlan_rx(mgp->dev, va, skb); 1366 skb_record_rx_queue(skb, ss - &mgp->ss[0]); 1367 1368 napi_gro_frags(&ss->napi); 1369 return 1; 1370 } 1371 1372 static inline void 1373 myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index) 1374 { 1375 struct pci_dev *pdev = ss->mgp->pdev; 1376 struct myri10ge_tx_buf *tx = &ss->tx; 1377 struct netdev_queue *dev_queue; 1378 struct sk_buff *skb; 1379 int idx, len; 1380 1381 while (tx->pkt_done != mcp_index) { 1382 idx = tx->done & tx->mask; 1383 skb = tx->info[idx].skb; 1384 1385 /* Mark as free */ 1386 tx->info[idx].skb = NULL; 1387 if (tx->info[idx].last) { 1388 tx->pkt_done++; 1389 tx->info[idx].last = 0; 1390 } 1391 tx->done++; 1392 len = dma_unmap_len(&tx->info[idx], len); 1393 dma_unmap_len_set(&tx->info[idx], len, 0); 1394 if (skb) { 1395 ss->stats.tx_bytes += skb->len; 1396 ss->stats.tx_packets++; 1397 dev_kfree_skb_irq(skb); 1398 if (len) 1399 pci_unmap_single(pdev, 1400 dma_unmap_addr(&tx->info[idx], 1401 bus), len, 1402 PCI_DMA_TODEVICE); 1403 } else { 1404 if (len) 1405 pci_unmap_page(pdev, 1406 dma_unmap_addr(&tx->info[idx], 1407 bus), len, 1408 PCI_DMA_TODEVICE); 1409 } 1410 } 1411 1412 dev_queue = netdev_get_tx_queue(ss->dev, ss - ss->mgp->ss); 1413 /* 1414 * Make a minimal effort to prevent the NIC from polling an 1415 * idle tx queue. If we can't get the lock we leave the queue 1416 * active. In this case, either a thread was about to start 1417 * using the queue anyway, or we lost a race and the NIC will 1418 * waste some of its resources polling an inactive queue for a 1419 * while. 1420 */ 1421 1422 if ((ss->mgp->dev->real_num_tx_queues > 1) && 1423 __netif_tx_trylock(dev_queue)) { 1424 if (tx->req == tx->done) { 1425 tx->queue_active = 0; 1426 put_be32(htonl(1), tx->send_stop); 1427 mb(); 1428 mmiowb(); 1429 } 1430 __netif_tx_unlock(dev_queue); 1431 } 1432 1433 /* start the queue if we've stopped it */ 1434 if (netif_tx_queue_stopped(dev_queue) && 1435 tx->req - tx->done < (tx->mask >> 1) && 1436 ss->mgp->running == MYRI10GE_ETH_RUNNING) { 1437 tx->wake_queue++; 1438 netif_tx_wake_queue(dev_queue); 1439 } 1440 } 1441 1442 static inline int 1443 myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) 1444 { 1445 struct myri10ge_rx_done *rx_done = &ss->rx_done; 1446 struct myri10ge_priv *mgp = ss->mgp; 1447 unsigned long rx_bytes = 0; 1448 unsigned long rx_packets = 0; 1449 unsigned long rx_ok; 1450 int idx = rx_done->idx; 1451 int cnt = rx_done->cnt; 1452 int work_done = 0; 1453 u16 length; 1454 __wsum checksum; 1455 1456 while (rx_done->entry[idx].length != 0 && work_done < budget) { 1457 length = ntohs(rx_done->entry[idx].length); 1458 rx_done->entry[idx].length = 0; 1459 checksum = csum_unfold(rx_done->entry[idx].checksum); 1460 rx_ok = myri10ge_rx_done(ss, length, checksum); 1461 rx_packets += rx_ok; 1462 rx_bytes += rx_ok * (unsigned long)length; 1463 cnt++; 1464 idx = cnt & (mgp->max_intr_slots - 1); 1465 work_done++; 1466 } 1467 rx_done->idx = idx; 1468 rx_done->cnt = cnt; 1469 ss->stats.rx_packets += rx_packets; 1470 ss->stats.rx_bytes += rx_bytes; 1471 1472 /* restock receive rings if needed */ 1473 if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh) 1474 myri10ge_alloc_rx_pages(mgp, &ss->rx_small, 1475 mgp->small_bytes + MXGEFW_PAD, 0); 1476 if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh) 1477 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); 1478 1479 return work_done; 1480 } 1481 1482 static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) 1483 { 1484 struct mcp_irq_data *stats = mgp->ss[0].fw_stats; 1485 1486 if (unlikely(stats->stats_updated)) { 1487 unsigned link_up = ntohl(stats->link_up); 1488 if (mgp->link_state != link_up) { 1489 mgp->link_state = link_up; 1490 1491 if (mgp->link_state == MXGEFW_LINK_UP) { 1492 netif_info(mgp, link, mgp->dev, "link up\n"); 1493 netif_carrier_on(mgp->dev); 1494 mgp->link_changes++; 1495 } else { 1496 netif_info(mgp, link, mgp->dev, "link %s\n", 1497 (link_up == MXGEFW_LINK_MYRINET ? 1498 "mismatch (Myrinet detected)" : 1499 "down")); 1500 netif_carrier_off(mgp->dev); 1501 mgp->link_changes++; 1502 } 1503 } 1504 if (mgp->rdma_tags_available != 1505 ntohl(stats->rdma_tags_available)) { 1506 mgp->rdma_tags_available = 1507 ntohl(stats->rdma_tags_available); 1508 netdev_warn(mgp->dev, "RDMA timed out! %d tags left\n", 1509 mgp->rdma_tags_available); 1510 } 1511 mgp->down_cnt += stats->link_down; 1512 if (stats->link_down) 1513 wake_up(&mgp->down_wq); 1514 } 1515 } 1516 1517 static int myri10ge_poll(struct napi_struct *napi, int budget) 1518 { 1519 struct myri10ge_slice_state *ss = 1520 container_of(napi, struct myri10ge_slice_state, napi); 1521 int work_done; 1522 1523 #ifdef CONFIG_MYRI10GE_DCA 1524 if (ss->mgp->dca_enabled) 1525 myri10ge_update_dca(ss); 1526 #endif 1527 1528 /* process as many rx events as NAPI will allow */ 1529 work_done = myri10ge_clean_rx_done(ss, budget); 1530 1531 if (work_done < budget) { 1532 napi_complete(napi); 1533 put_be32(htonl(3), ss->irq_claim); 1534 } 1535 return work_done; 1536 } 1537 1538 static irqreturn_t myri10ge_intr(int irq, void *arg) 1539 { 1540 struct myri10ge_slice_state *ss = arg; 1541 struct myri10ge_priv *mgp = ss->mgp; 1542 struct mcp_irq_data *stats = ss->fw_stats; 1543 struct myri10ge_tx_buf *tx = &ss->tx; 1544 u32 send_done_count; 1545 int i; 1546 1547 /* an interrupt on a non-zero receive-only slice is implicitly 1548 * valid since MSI-X irqs are not shared */ 1549 if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) { 1550 napi_schedule(&ss->napi); 1551 return IRQ_HANDLED; 1552 } 1553 1554 /* make sure it is our IRQ, and that the DMA has finished */ 1555 if (unlikely(!stats->valid)) 1556 return IRQ_NONE; 1557 1558 /* low bit indicates receives are present, so schedule 1559 * napi poll handler */ 1560 if (stats->valid & 1) 1561 napi_schedule(&ss->napi); 1562 1563 if (!mgp->msi_enabled && !mgp->msix_enabled) { 1564 put_be32(0, mgp->irq_deassert); 1565 if (!myri10ge_deassert_wait) 1566 stats->valid = 0; 1567 mb(); 1568 } else 1569 stats->valid = 0; 1570 1571 /* Wait for IRQ line to go low, if using INTx */ 1572 i = 0; 1573 while (1) { 1574 i++; 1575 /* check for transmit completes and receives */ 1576 send_done_count = ntohl(stats->send_done_count); 1577 if (send_done_count != tx->pkt_done) 1578 myri10ge_tx_done(ss, (int)send_done_count); 1579 if (unlikely(i > myri10ge_max_irq_loops)) { 1580 netdev_warn(mgp->dev, "irq stuck?\n"); 1581 stats->valid = 0; 1582 schedule_work(&mgp->watchdog_work); 1583 } 1584 if (likely(stats->valid == 0)) 1585 break; 1586 cpu_relax(); 1587 barrier(); 1588 } 1589 1590 /* Only slice 0 updates stats */ 1591 if (ss == mgp->ss) 1592 myri10ge_check_statblock(mgp); 1593 1594 put_be32(htonl(3), ss->irq_claim + 1); 1595 return IRQ_HANDLED; 1596 } 1597 1598 static int 1599 myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) 1600 { 1601 struct myri10ge_priv *mgp = netdev_priv(netdev); 1602 char *ptr; 1603 int i; 1604 1605 cmd->autoneg = AUTONEG_DISABLE; 1606 ethtool_cmd_speed_set(cmd, SPEED_10000); 1607 cmd->duplex = DUPLEX_FULL; 1608 1609 /* 1610 * parse the product code to deterimine the interface type 1611 * (CX4, XFP, Quad Ribbon Fiber) by looking at the character 1612 * after the 3rd dash in the driver's cached copy of the 1613 * EEPROM's product code string. 1614 */ 1615 ptr = mgp->product_code_string; 1616 if (ptr == NULL) { 1617 netdev_err(netdev, "Missing product code\n"); 1618 return 0; 1619 } 1620 for (i = 0; i < 3; i++, ptr++) { 1621 ptr = strchr(ptr, '-'); 1622 if (ptr == NULL) { 1623 netdev_err(netdev, "Invalid product code %s\n", 1624 mgp->product_code_string); 1625 return 0; 1626 } 1627 } 1628 if (*ptr == '2') 1629 ptr++; 1630 if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') { 1631 /* We've found either an XFP, quad ribbon fiber, or SFP+ */ 1632 cmd->port = PORT_FIBRE; 1633 cmd->supported |= SUPPORTED_FIBRE; 1634 cmd->advertising |= ADVERTISED_FIBRE; 1635 } else { 1636 cmd->port = PORT_OTHER; 1637 } 1638 if (*ptr == 'R' || *ptr == 'S') 1639 cmd->transceiver = XCVR_EXTERNAL; 1640 else 1641 cmd->transceiver = XCVR_INTERNAL; 1642 1643 return 0; 1644 } 1645 1646 static void 1647 myri10ge_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) 1648 { 1649 struct myri10ge_priv *mgp = netdev_priv(netdev); 1650 1651 strlcpy(info->driver, "myri10ge", sizeof(info->driver)); 1652 strlcpy(info->version, MYRI10GE_VERSION_STR, sizeof(info->version)); 1653 strlcpy(info->fw_version, mgp->fw_version, sizeof(info->fw_version)); 1654 strlcpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info)); 1655 } 1656 1657 static int 1658 myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal) 1659 { 1660 struct myri10ge_priv *mgp = netdev_priv(netdev); 1661 1662 coal->rx_coalesce_usecs = mgp->intr_coal_delay; 1663 return 0; 1664 } 1665 1666 static int 1667 myri10ge_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal) 1668 { 1669 struct myri10ge_priv *mgp = netdev_priv(netdev); 1670 1671 mgp->intr_coal_delay = coal->rx_coalesce_usecs; 1672 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); 1673 return 0; 1674 } 1675 1676 static void 1677 myri10ge_get_pauseparam(struct net_device *netdev, 1678 struct ethtool_pauseparam *pause) 1679 { 1680 struct myri10ge_priv *mgp = netdev_priv(netdev); 1681 1682 pause->autoneg = 0; 1683 pause->rx_pause = mgp->pause; 1684 pause->tx_pause = mgp->pause; 1685 } 1686 1687 static int 1688 myri10ge_set_pauseparam(struct net_device *netdev, 1689 struct ethtool_pauseparam *pause) 1690 { 1691 struct myri10ge_priv *mgp = netdev_priv(netdev); 1692 1693 if (pause->tx_pause != mgp->pause) 1694 return myri10ge_change_pause(mgp, pause->tx_pause); 1695 if (pause->rx_pause != mgp->pause) 1696 return myri10ge_change_pause(mgp, pause->rx_pause); 1697 if (pause->autoneg != 0) 1698 return -EINVAL; 1699 return 0; 1700 } 1701 1702 static void 1703 myri10ge_get_ringparam(struct net_device *netdev, 1704 struct ethtool_ringparam *ring) 1705 { 1706 struct myri10ge_priv *mgp = netdev_priv(netdev); 1707 1708 ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1; 1709 ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1; 1710 ring->rx_jumbo_max_pending = 0; 1711 ring->tx_max_pending = mgp->ss[0].tx.mask + 1; 1712 ring->rx_mini_pending = ring->rx_mini_max_pending; 1713 ring->rx_pending = ring->rx_max_pending; 1714 ring->rx_jumbo_pending = ring->rx_jumbo_max_pending; 1715 ring->tx_pending = ring->tx_max_pending; 1716 } 1717 1718 static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = { 1719 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", 1720 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", 1721 "rx_length_errors", "rx_over_errors", "rx_crc_errors", 1722 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", 1723 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", 1724 "tx_heartbeat_errors", "tx_window_errors", 1725 /* device-specific stats */ 1726 "tx_boundary", "WC", "irq", "MSI", "MSIX", 1727 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", 1728 "serial_number", "watchdog_resets", 1729 #ifdef CONFIG_MYRI10GE_DCA 1730 "dca_capable_firmware", "dca_device_present", 1731 #endif 1732 "link_changes", "link_up", "dropped_link_overflow", 1733 "dropped_link_error_or_filtered", 1734 "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32", 1735 "dropped_unicast_filtered", "dropped_multicast_filtered", 1736 "dropped_runt", "dropped_overrun", "dropped_no_small_buffer", 1737 "dropped_no_big_buffer" 1738 }; 1739 1740 static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = { 1741 "----------- slice ---------", 1742 "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done", 1743 "rx_small_cnt", "rx_big_cnt", 1744 "wake_queue", "stop_queue", "tx_linearized", 1745 }; 1746 1747 #define MYRI10GE_NET_STATS_LEN 21 1748 #define MYRI10GE_MAIN_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_main_stats) 1749 #define MYRI10GE_SLICE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_slice_stats) 1750 1751 static void 1752 myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data) 1753 { 1754 struct myri10ge_priv *mgp = netdev_priv(netdev); 1755 int i; 1756 1757 switch (stringset) { 1758 case ETH_SS_STATS: 1759 memcpy(data, *myri10ge_gstrings_main_stats, 1760 sizeof(myri10ge_gstrings_main_stats)); 1761 data += sizeof(myri10ge_gstrings_main_stats); 1762 for (i = 0; i < mgp->num_slices; i++) { 1763 memcpy(data, *myri10ge_gstrings_slice_stats, 1764 sizeof(myri10ge_gstrings_slice_stats)); 1765 data += sizeof(myri10ge_gstrings_slice_stats); 1766 } 1767 break; 1768 } 1769 } 1770 1771 static int myri10ge_get_sset_count(struct net_device *netdev, int sset) 1772 { 1773 struct myri10ge_priv *mgp = netdev_priv(netdev); 1774 1775 switch (sset) { 1776 case ETH_SS_STATS: 1777 return MYRI10GE_MAIN_STATS_LEN + 1778 mgp->num_slices * MYRI10GE_SLICE_STATS_LEN; 1779 default: 1780 return -EOPNOTSUPP; 1781 } 1782 } 1783 1784 static void 1785 myri10ge_get_ethtool_stats(struct net_device *netdev, 1786 struct ethtool_stats *stats, u64 * data) 1787 { 1788 struct myri10ge_priv *mgp = netdev_priv(netdev); 1789 struct myri10ge_slice_state *ss; 1790 struct rtnl_link_stats64 link_stats; 1791 int slice; 1792 int i; 1793 1794 /* force stats update */ 1795 memset(&link_stats, 0, sizeof(link_stats)); 1796 (void)myri10ge_get_stats(netdev, &link_stats); 1797 for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++) 1798 data[i] = ((u64 *)&link_stats)[i]; 1799 1800 data[i++] = (unsigned int)mgp->tx_boundary; 1801 data[i++] = (unsigned int)mgp->wc_enabled; 1802 data[i++] = (unsigned int)mgp->pdev->irq; 1803 data[i++] = (unsigned int)mgp->msi_enabled; 1804 data[i++] = (unsigned int)mgp->msix_enabled; 1805 data[i++] = (unsigned int)mgp->read_dma; 1806 data[i++] = (unsigned int)mgp->write_dma; 1807 data[i++] = (unsigned int)mgp->read_write_dma; 1808 data[i++] = (unsigned int)mgp->serial_number; 1809 data[i++] = (unsigned int)mgp->watchdog_resets; 1810 #ifdef CONFIG_MYRI10GE_DCA 1811 data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL); 1812 data[i++] = (unsigned int)(mgp->dca_enabled); 1813 #endif 1814 data[i++] = (unsigned int)mgp->link_changes; 1815 1816 /* firmware stats are useful only in the first slice */ 1817 ss = &mgp->ss[0]; 1818 data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up); 1819 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow); 1820 data[i++] = 1821 (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered); 1822 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause); 1823 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy); 1824 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32); 1825 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered); 1826 data[i++] = 1827 (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered); 1828 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt); 1829 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun); 1830 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer); 1831 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer); 1832 1833 for (slice = 0; slice < mgp->num_slices; slice++) { 1834 ss = &mgp->ss[slice]; 1835 data[i++] = slice; 1836 data[i++] = (unsigned int)ss->tx.pkt_start; 1837 data[i++] = (unsigned int)ss->tx.pkt_done; 1838 data[i++] = (unsigned int)ss->tx.req; 1839 data[i++] = (unsigned int)ss->tx.done; 1840 data[i++] = (unsigned int)ss->rx_small.cnt; 1841 data[i++] = (unsigned int)ss->rx_big.cnt; 1842 data[i++] = (unsigned int)ss->tx.wake_queue; 1843 data[i++] = (unsigned int)ss->tx.stop_queue; 1844 data[i++] = (unsigned int)ss->tx.linearized; 1845 } 1846 } 1847 1848 static void myri10ge_set_msglevel(struct net_device *netdev, u32 value) 1849 { 1850 struct myri10ge_priv *mgp = netdev_priv(netdev); 1851 mgp->msg_enable = value; 1852 } 1853 1854 static u32 myri10ge_get_msglevel(struct net_device *netdev) 1855 { 1856 struct myri10ge_priv *mgp = netdev_priv(netdev); 1857 return mgp->msg_enable; 1858 } 1859 1860 /* 1861 * Use a low-level command to change the LED behavior. Rather than 1862 * blinking (which is the normal case), when identify is used, the 1863 * yellow LED turns solid. 1864 */ 1865 static int myri10ge_led(struct myri10ge_priv *mgp, int on) 1866 { 1867 struct mcp_gen_header *hdr; 1868 struct device *dev = &mgp->pdev->dev; 1869 size_t hdr_off, pattern_off, hdr_len; 1870 u32 pattern = 0xfffffffe; 1871 1872 /* find running firmware header */ 1873 hdr_off = swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET)); 1874 if ((hdr_off & 3) || hdr_off + sizeof(*hdr) > mgp->sram_size) { 1875 dev_err(dev, "Running firmware has bad header offset (%d)\n", 1876 (int)hdr_off); 1877 return -EIO; 1878 } 1879 hdr_len = swab32(readl(mgp->sram + hdr_off + 1880 offsetof(struct mcp_gen_header, header_length))); 1881 pattern_off = hdr_off + offsetof(struct mcp_gen_header, led_pattern); 1882 if (pattern_off >= (hdr_len + hdr_off)) { 1883 dev_info(dev, "Firmware does not support LED identification\n"); 1884 return -EINVAL; 1885 } 1886 if (!on) 1887 pattern = swab32(readl(mgp->sram + pattern_off + 4)); 1888 writel(swab32(pattern), mgp->sram + pattern_off); 1889 return 0; 1890 } 1891 1892 static int 1893 myri10ge_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) 1894 { 1895 struct myri10ge_priv *mgp = netdev_priv(netdev); 1896 int rc; 1897 1898 switch (state) { 1899 case ETHTOOL_ID_ACTIVE: 1900 rc = myri10ge_led(mgp, 1); 1901 break; 1902 1903 case ETHTOOL_ID_INACTIVE: 1904 rc = myri10ge_led(mgp, 0); 1905 break; 1906 1907 default: 1908 rc = -EINVAL; 1909 } 1910 1911 return rc; 1912 } 1913 1914 static const struct ethtool_ops myri10ge_ethtool_ops = { 1915 .get_settings = myri10ge_get_settings, 1916 .get_drvinfo = myri10ge_get_drvinfo, 1917 .get_coalesce = myri10ge_get_coalesce, 1918 .set_coalesce = myri10ge_set_coalesce, 1919 .get_pauseparam = myri10ge_get_pauseparam, 1920 .set_pauseparam = myri10ge_set_pauseparam, 1921 .get_ringparam = myri10ge_get_ringparam, 1922 .get_link = ethtool_op_get_link, 1923 .get_strings = myri10ge_get_strings, 1924 .get_sset_count = myri10ge_get_sset_count, 1925 .get_ethtool_stats = myri10ge_get_ethtool_stats, 1926 .set_msglevel = myri10ge_set_msglevel, 1927 .get_msglevel = myri10ge_get_msglevel, 1928 .set_phys_id = myri10ge_phys_id, 1929 }; 1930 1931 static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss) 1932 { 1933 struct myri10ge_priv *mgp = ss->mgp; 1934 struct myri10ge_cmd cmd; 1935 struct net_device *dev = mgp->dev; 1936 int tx_ring_size, rx_ring_size; 1937 int tx_ring_entries, rx_ring_entries; 1938 int i, slice, status; 1939 size_t bytes; 1940 1941 /* get ring sizes */ 1942 slice = ss - mgp->ss; 1943 cmd.data0 = slice; 1944 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0); 1945 tx_ring_size = cmd.data0; 1946 cmd.data0 = slice; 1947 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); 1948 if (status != 0) 1949 return status; 1950 rx_ring_size = cmd.data0; 1951 1952 tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send); 1953 rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr); 1954 ss->tx.mask = tx_ring_entries - 1; 1955 ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1; 1956 1957 status = -ENOMEM; 1958 1959 /* allocate the host shadow rings */ 1960 1961 bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4) 1962 * sizeof(*ss->tx.req_list); 1963 ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL); 1964 if (ss->tx.req_bytes == NULL) 1965 goto abort_with_nothing; 1966 1967 /* ensure req_list entries are aligned to 8 bytes */ 1968 ss->tx.req_list = (struct mcp_kreq_ether_send *) 1969 ALIGN((unsigned long)ss->tx.req_bytes, 8); 1970 ss->tx.queue_active = 0; 1971 1972 bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow); 1973 ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); 1974 if (ss->rx_small.shadow == NULL) 1975 goto abort_with_tx_req_bytes; 1976 1977 bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow); 1978 ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL); 1979 if (ss->rx_big.shadow == NULL) 1980 goto abort_with_rx_small_shadow; 1981 1982 /* allocate the host info rings */ 1983 1984 bytes = tx_ring_entries * sizeof(*ss->tx.info); 1985 ss->tx.info = kzalloc(bytes, GFP_KERNEL); 1986 if (ss->tx.info == NULL) 1987 goto abort_with_rx_big_shadow; 1988 1989 bytes = rx_ring_entries * sizeof(*ss->rx_small.info); 1990 ss->rx_small.info = kzalloc(bytes, GFP_KERNEL); 1991 if (ss->rx_small.info == NULL) 1992 goto abort_with_tx_info; 1993 1994 bytes = rx_ring_entries * sizeof(*ss->rx_big.info); 1995 ss->rx_big.info = kzalloc(bytes, GFP_KERNEL); 1996 if (ss->rx_big.info == NULL) 1997 goto abort_with_rx_small_info; 1998 1999 /* Fill the receive rings */ 2000 ss->rx_big.cnt = 0; 2001 ss->rx_small.cnt = 0; 2002 ss->rx_big.fill_cnt = 0; 2003 ss->rx_small.fill_cnt = 0; 2004 ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE; 2005 ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE; 2006 ss->rx_small.watchdog_needed = 0; 2007 ss->rx_big.watchdog_needed = 0; 2008 if (mgp->small_bytes == 0) { 2009 ss->rx_small.fill_cnt = ss->rx_small.mask + 1; 2010 } else { 2011 myri10ge_alloc_rx_pages(mgp, &ss->rx_small, 2012 mgp->small_bytes + MXGEFW_PAD, 0); 2013 } 2014 2015 if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) { 2016 netdev_err(dev, "slice-%d: alloced only %d small bufs\n", 2017 slice, ss->rx_small.fill_cnt); 2018 goto abort_with_rx_small_ring; 2019 } 2020 2021 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); 2022 if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) { 2023 netdev_err(dev, "slice-%d: alloced only %d big bufs\n", 2024 slice, ss->rx_big.fill_cnt); 2025 goto abort_with_rx_big_ring; 2026 } 2027 2028 return 0; 2029 2030 abort_with_rx_big_ring: 2031 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { 2032 int idx = i & ss->rx_big.mask; 2033 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], 2034 mgp->big_bytes); 2035 put_page(ss->rx_big.info[idx].page); 2036 } 2037 2038 abort_with_rx_small_ring: 2039 if (mgp->small_bytes == 0) 2040 ss->rx_small.fill_cnt = ss->rx_small.cnt; 2041 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { 2042 int idx = i & ss->rx_small.mask; 2043 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], 2044 mgp->small_bytes + MXGEFW_PAD); 2045 put_page(ss->rx_small.info[idx].page); 2046 } 2047 2048 kfree(ss->rx_big.info); 2049 2050 abort_with_rx_small_info: 2051 kfree(ss->rx_small.info); 2052 2053 abort_with_tx_info: 2054 kfree(ss->tx.info); 2055 2056 abort_with_rx_big_shadow: 2057 kfree(ss->rx_big.shadow); 2058 2059 abort_with_rx_small_shadow: 2060 kfree(ss->rx_small.shadow); 2061 2062 abort_with_tx_req_bytes: 2063 kfree(ss->tx.req_bytes); 2064 ss->tx.req_bytes = NULL; 2065 ss->tx.req_list = NULL; 2066 2067 abort_with_nothing: 2068 return status; 2069 } 2070 2071 static void myri10ge_free_rings(struct myri10ge_slice_state *ss) 2072 { 2073 struct myri10ge_priv *mgp = ss->mgp; 2074 struct sk_buff *skb; 2075 struct myri10ge_tx_buf *tx; 2076 int i, len, idx; 2077 2078 /* If not allocated, skip it */ 2079 if (ss->tx.req_list == NULL) 2080 return; 2081 2082 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { 2083 idx = i & ss->rx_big.mask; 2084 if (i == ss->rx_big.fill_cnt - 1) 2085 ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE; 2086 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], 2087 mgp->big_bytes); 2088 put_page(ss->rx_big.info[idx].page); 2089 } 2090 2091 if (mgp->small_bytes == 0) 2092 ss->rx_small.fill_cnt = ss->rx_small.cnt; 2093 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { 2094 idx = i & ss->rx_small.mask; 2095 if (i == ss->rx_small.fill_cnt - 1) 2096 ss->rx_small.info[idx].page_offset = 2097 MYRI10GE_ALLOC_SIZE; 2098 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], 2099 mgp->small_bytes + MXGEFW_PAD); 2100 put_page(ss->rx_small.info[idx].page); 2101 } 2102 tx = &ss->tx; 2103 while (tx->done != tx->req) { 2104 idx = tx->done & tx->mask; 2105 skb = tx->info[idx].skb; 2106 2107 /* Mark as free */ 2108 tx->info[idx].skb = NULL; 2109 tx->done++; 2110 len = dma_unmap_len(&tx->info[idx], len); 2111 dma_unmap_len_set(&tx->info[idx], len, 0); 2112 if (skb) { 2113 ss->stats.tx_dropped++; 2114 dev_kfree_skb_any(skb); 2115 if (len) 2116 pci_unmap_single(mgp->pdev, 2117 dma_unmap_addr(&tx->info[idx], 2118 bus), len, 2119 PCI_DMA_TODEVICE); 2120 } else { 2121 if (len) 2122 pci_unmap_page(mgp->pdev, 2123 dma_unmap_addr(&tx->info[idx], 2124 bus), len, 2125 PCI_DMA_TODEVICE); 2126 } 2127 } 2128 kfree(ss->rx_big.info); 2129 2130 kfree(ss->rx_small.info); 2131 2132 kfree(ss->tx.info); 2133 2134 kfree(ss->rx_big.shadow); 2135 2136 kfree(ss->rx_small.shadow); 2137 2138 kfree(ss->tx.req_bytes); 2139 ss->tx.req_bytes = NULL; 2140 ss->tx.req_list = NULL; 2141 } 2142 2143 static int myri10ge_request_irq(struct myri10ge_priv *mgp) 2144 { 2145 struct pci_dev *pdev = mgp->pdev; 2146 struct myri10ge_slice_state *ss; 2147 struct net_device *netdev = mgp->dev; 2148 int i; 2149 int status; 2150 2151 mgp->msi_enabled = 0; 2152 mgp->msix_enabled = 0; 2153 status = 0; 2154 if (myri10ge_msi) { 2155 if (mgp->num_slices > 1) { 2156 status = 2157 pci_enable_msix(pdev, mgp->msix_vectors, 2158 mgp->num_slices); 2159 if (status == 0) { 2160 mgp->msix_enabled = 1; 2161 } else { 2162 dev_err(&pdev->dev, 2163 "Error %d setting up MSI-X\n", status); 2164 return status; 2165 } 2166 } 2167 if (mgp->msix_enabled == 0) { 2168 status = pci_enable_msi(pdev); 2169 if (status != 0) { 2170 dev_err(&pdev->dev, 2171 "Error %d setting up MSI; falling back to xPIC\n", 2172 status); 2173 } else { 2174 mgp->msi_enabled = 1; 2175 } 2176 } 2177 } 2178 if (mgp->msix_enabled) { 2179 for (i = 0; i < mgp->num_slices; i++) { 2180 ss = &mgp->ss[i]; 2181 snprintf(ss->irq_desc, sizeof(ss->irq_desc), 2182 "%s:slice-%d", netdev->name, i); 2183 status = request_irq(mgp->msix_vectors[i].vector, 2184 myri10ge_intr, 0, ss->irq_desc, 2185 ss); 2186 if (status != 0) { 2187 dev_err(&pdev->dev, 2188 "slice %d failed to allocate IRQ\n", i); 2189 i--; 2190 while (i >= 0) { 2191 free_irq(mgp->msix_vectors[i].vector, 2192 &mgp->ss[i]); 2193 i--; 2194 } 2195 pci_disable_msix(pdev); 2196 return status; 2197 } 2198 } 2199 } else { 2200 status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED, 2201 mgp->dev->name, &mgp->ss[0]); 2202 if (status != 0) { 2203 dev_err(&pdev->dev, "failed to allocate IRQ\n"); 2204 if (mgp->msi_enabled) 2205 pci_disable_msi(pdev); 2206 } 2207 } 2208 return status; 2209 } 2210 2211 static void myri10ge_free_irq(struct myri10ge_priv *mgp) 2212 { 2213 struct pci_dev *pdev = mgp->pdev; 2214 int i; 2215 2216 if (mgp->msix_enabled) { 2217 for (i = 0; i < mgp->num_slices; i++) 2218 free_irq(mgp->msix_vectors[i].vector, &mgp->ss[i]); 2219 } else { 2220 free_irq(pdev->irq, &mgp->ss[0]); 2221 } 2222 if (mgp->msi_enabled) 2223 pci_disable_msi(pdev); 2224 if (mgp->msix_enabled) 2225 pci_disable_msix(pdev); 2226 } 2227 2228 static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice) 2229 { 2230 struct myri10ge_cmd cmd; 2231 struct myri10ge_slice_state *ss; 2232 int status; 2233 2234 ss = &mgp->ss[slice]; 2235 status = 0; 2236 if (slice == 0 || (mgp->dev->real_num_tx_queues > 1)) { 2237 cmd.data0 = slice; 2238 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, 2239 &cmd, 0); 2240 ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *) 2241 (mgp->sram + cmd.data0); 2242 } 2243 cmd.data0 = slice; 2244 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, 2245 &cmd, 0); 2246 ss->rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *) 2247 (mgp->sram + cmd.data0); 2248 2249 cmd.data0 = slice; 2250 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0); 2251 ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *) 2252 (mgp->sram + cmd.data0); 2253 2254 ss->tx.send_go = (__iomem __be32 *) 2255 (mgp->sram + MXGEFW_ETH_SEND_GO + 64 * slice); 2256 ss->tx.send_stop = (__iomem __be32 *) 2257 (mgp->sram + MXGEFW_ETH_SEND_STOP + 64 * slice); 2258 return status; 2259 2260 } 2261 2262 static int myri10ge_set_stats(struct myri10ge_priv *mgp, int slice) 2263 { 2264 struct myri10ge_cmd cmd; 2265 struct myri10ge_slice_state *ss; 2266 int status; 2267 2268 ss = &mgp->ss[slice]; 2269 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->fw_stats_bus); 2270 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->fw_stats_bus); 2271 cmd.data2 = sizeof(struct mcp_irq_data) | (slice << 16); 2272 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0); 2273 if (status == -ENOSYS) { 2274 dma_addr_t bus = ss->fw_stats_bus; 2275 if (slice != 0) 2276 return -EINVAL; 2277 bus += offsetof(struct mcp_irq_data, send_done_count); 2278 cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus); 2279 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus); 2280 status = myri10ge_send_cmd(mgp, 2281 MXGEFW_CMD_SET_STATS_DMA_OBSOLETE, 2282 &cmd, 0); 2283 /* Firmware cannot support multicast without STATS_DMA_V2 */ 2284 mgp->fw_multicast_support = 0; 2285 } else { 2286 mgp->fw_multicast_support = 1; 2287 } 2288 return 0; 2289 } 2290 2291 static int myri10ge_open(struct net_device *dev) 2292 { 2293 struct myri10ge_slice_state *ss; 2294 struct myri10ge_priv *mgp = netdev_priv(dev); 2295 struct myri10ge_cmd cmd; 2296 int i, status, big_pow2, slice; 2297 u8 __iomem *itable; 2298 2299 if (mgp->running != MYRI10GE_ETH_STOPPED) 2300 return -EBUSY; 2301 2302 mgp->running = MYRI10GE_ETH_STARTING; 2303 status = myri10ge_reset(mgp); 2304 if (status != 0) { 2305 netdev_err(dev, "failed reset\n"); 2306 goto abort_with_nothing; 2307 } 2308 2309 if (mgp->num_slices > 1) { 2310 cmd.data0 = mgp->num_slices; 2311 cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE; 2312 if (mgp->dev->real_num_tx_queues > 1) 2313 cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES; 2314 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES, 2315 &cmd, 0); 2316 if (status != 0) { 2317 netdev_err(dev, "failed to set number of slices\n"); 2318 goto abort_with_nothing; 2319 } 2320 /* setup the indirection table */ 2321 cmd.data0 = mgp->num_slices; 2322 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_TABLE_SIZE, 2323 &cmd, 0); 2324 2325 status |= myri10ge_send_cmd(mgp, 2326 MXGEFW_CMD_GET_RSS_TABLE_OFFSET, 2327 &cmd, 0); 2328 if (status != 0) { 2329 netdev_err(dev, "failed to setup rss tables\n"); 2330 goto abort_with_nothing; 2331 } 2332 2333 /* just enable an identity mapping */ 2334 itable = mgp->sram + cmd.data0; 2335 for (i = 0; i < mgp->num_slices; i++) 2336 __raw_writeb(i, &itable[i]); 2337 2338 cmd.data0 = 1; 2339 cmd.data1 = myri10ge_rss_hash; 2340 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_ENABLE, 2341 &cmd, 0); 2342 if (status != 0) { 2343 netdev_err(dev, "failed to enable slices\n"); 2344 goto abort_with_nothing; 2345 } 2346 } 2347 2348 status = myri10ge_request_irq(mgp); 2349 if (status != 0) 2350 goto abort_with_nothing; 2351 2352 /* decide what small buffer size to use. For good TCP rx 2353 * performance, it is important to not receive 1514 byte 2354 * frames into jumbo buffers, as it confuses the socket buffer 2355 * accounting code, leading to drops and erratic performance. 2356 */ 2357 2358 if (dev->mtu <= ETH_DATA_LEN) 2359 /* enough for a TCP header */ 2360 mgp->small_bytes = (128 > SMP_CACHE_BYTES) 2361 ? (128 - MXGEFW_PAD) 2362 : (SMP_CACHE_BYTES - MXGEFW_PAD); 2363 else 2364 /* enough for a vlan encapsulated ETH_DATA_LEN frame */ 2365 mgp->small_bytes = VLAN_ETH_FRAME_LEN; 2366 2367 /* Override the small buffer size? */ 2368 if (myri10ge_small_bytes >= 0) 2369 mgp->small_bytes = myri10ge_small_bytes; 2370 2371 /* Firmware needs the big buff size as a power of 2. Lie and 2372 * tell him the buffer is larger, because we only use 1 2373 * buffer/pkt, and the mtu will prevent overruns. 2374 */ 2375 big_pow2 = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD; 2376 if (big_pow2 < MYRI10GE_ALLOC_SIZE / 2) { 2377 while (!is_power_of_2(big_pow2)) 2378 big_pow2++; 2379 mgp->big_bytes = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD; 2380 } else { 2381 big_pow2 = MYRI10GE_ALLOC_SIZE; 2382 mgp->big_bytes = big_pow2; 2383 } 2384 2385 /* setup the per-slice data structures */ 2386 for (slice = 0; slice < mgp->num_slices; slice++) { 2387 ss = &mgp->ss[slice]; 2388 2389 status = myri10ge_get_txrx(mgp, slice); 2390 if (status != 0) { 2391 netdev_err(dev, "failed to get ring sizes or locations\n"); 2392 goto abort_with_rings; 2393 } 2394 status = myri10ge_allocate_rings(ss); 2395 if (status != 0) 2396 goto abort_with_rings; 2397 2398 /* only firmware which supports multiple TX queues 2399 * supports setting up the tx stats on non-zero 2400 * slices */ 2401 if (slice == 0 || mgp->dev->real_num_tx_queues > 1) 2402 status = myri10ge_set_stats(mgp, slice); 2403 if (status) { 2404 netdev_err(dev, "Couldn't set stats DMA\n"); 2405 goto abort_with_rings; 2406 } 2407 2408 /* must happen prior to any irq */ 2409 napi_enable(&(ss)->napi); 2410 } 2411 2412 /* now give firmware buffers sizes, and MTU */ 2413 cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN; 2414 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_MTU, &cmd, 0); 2415 cmd.data0 = mgp->small_bytes; 2416 status |= 2417 myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, &cmd, 0); 2418 cmd.data0 = big_pow2; 2419 status |= 2420 myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd, 0); 2421 if (status) { 2422 netdev_err(dev, "Couldn't set buffer sizes\n"); 2423 goto abort_with_rings; 2424 } 2425 2426 /* 2427 * Set Linux style TSO mode; this is needed only on newer 2428 * firmware versions. Older versions default to Linux 2429 * style TSO 2430 */ 2431 cmd.data0 = 0; 2432 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_TSO_MODE, &cmd, 0); 2433 if (status && status != -ENOSYS) { 2434 netdev_err(dev, "Couldn't set TSO mode\n"); 2435 goto abort_with_rings; 2436 } 2437 2438 mgp->link_state = ~0U; 2439 mgp->rdma_tags_available = 15; 2440 2441 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0); 2442 if (status) { 2443 netdev_err(dev, "Couldn't bring up link\n"); 2444 goto abort_with_rings; 2445 } 2446 2447 mgp->running = MYRI10GE_ETH_RUNNING; 2448 mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ; 2449 add_timer(&mgp->watchdog_timer); 2450 netif_tx_wake_all_queues(dev); 2451 2452 return 0; 2453 2454 abort_with_rings: 2455 while (slice) { 2456 slice--; 2457 napi_disable(&mgp->ss[slice].napi); 2458 } 2459 for (i = 0; i < mgp->num_slices; i++) 2460 myri10ge_free_rings(&mgp->ss[i]); 2461 2462 myri10ge_free_irq(mgp); 2463 2464 abort_with_nothing: 2465 mgp->running = MYRI10GE_ETH_STOPPED; 2466 return -ENOMEM; 2467 } 2468 2469 static int myri10ge_close(struct net_device *dev) 2470 { 2471 struct myri10ge_priv *mgp = netdev_priv(dev); 2472 struct myri10ge_cmd cmd; 2473 int status, old_down_cnt; 2474 int i; 2475 2476 if (mgp->running != MYRI10GE_ETH_RUNNING) 2477 return 0; 2478 2479 if (mgp->ss[0].tx.req_bytes == NULL) 2480 return 0; 2481 2482 del_timer_sync(&mgp->watchdog_timer); 2483 mgp->running = MYRI10GE_ETH_STOPPING; 2484 for (i = 0; i < mgp->num_slices; i++) { 2485 napi_disable(&mgp->ss[i].napi); 2486 } 2487 netif_carrier_off(dev); 2488 2489 netif_tx_stop_all_queues(dev); 2490 if (mgp->rebooted == 0) { 2491 old_down_cnt = mgp->down_cnt; 2492 mb(); 2493 status = 2494 myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0); 2495 if (status) 2496 netdev_err(dev, "Couldn't bring down link\n"); 2497 2498 wait_event_timeout(mgp->down_wq, old_down_cnt != mgp->down_cnt, 2499 HZ); 2500 if (old_down_cnt == mgp->down_cnt) 2501 netdev_err(dev, "never got down irq\n"); 2502 } 2503 netif_tx_disable(dev); 2504 myri10ge_free_irq(mgp); 2505 for (i = 0; i < mgp->num_slices; i++) 2506 myri10ge_free_rings(&mgp->ss[i]); 2507 2508 mgp->running = MYRI10GE_ETH_STOPPED; 2509 return 0; 2510 } 2511 2512 /* copy an array of struct mcp_kreq_ether_send's to the mcp. Copy 2513 * backwards one at a time and handle ring wraps */ 2514 2515 static inline void 2516 myri10ge_submit_req_backwards(struct myri10ge_tx_buf *tx, 2517 struct mcp_kreq_ether_send *src, int cnt) 2518 { 2519 int idx, starting_slot; 2520 starting_slot = tx->req; 2521 while (cnt > 1) { 2522 cnt--; 2523 idx = (starting_slot + cnt) & tx->mask; 2524 myri10ge_pio_copy(&tx->lanai[idx], &src[cnt], sizeof(*src)); 2525 mb(); 2526 } 2527 } 2528 2529 /* 2530 * copy an array of struct mcp_kreq_ether_send's to the mcp. Copy 2531 * at most 32 bytes at a time, so as to avoid involving the software 2532 * pio handler in the nic. We re-write the first segment's flags 2533 * to mark them valid only after writing the entire chain. 2534 */ 2535 2536 static inline void 2537 myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src, 2538 int cnt) 2539 { 2540 int idx, i; 2541 struct mcp_kreq_ether_send __iomem *dstp, *dst; 2542 struct mcp_kreq_ether_send *srcp; 2543 u8 last_flags; 2544 2545 idx = tx->req & tx->mask; 2546 2547 last_flags = src->flags; 2548 src->flags = 0; 2549 mb(); 2550 dst = dstp = &tx->lanai[idx]; 2551 srcp = src; 2552 2553 if ((idx + cnt) < tx->mask) { 2554 for (i = 0; i < (cnt - 1); i += 2) { 2555 myri10ge_pio_copy(dstp, srcp, 2 * sizeof(*src)); 2556 mb(); /* force write every 32 bytes */ 2557 srcp += 2; 2558 dstp += 2; 2559 } 2560 } else { 2561 /* submit all but the first request, and ensure 2562 * that it is submitted below */ 2563 myri10ge_submit_req_backwards(tx, src, cnt); 2564 i = 0; 2565 } 2566 if (i < cnt) { 2567 /* submit the first request */ 2568 myri10ge_pio_copy(dstp, srcp, sizeof(*src)); 2569 mb(); /* barrier before setting valid flag */ 2570 } 2571 2572 /* re-write the last 32-bits with the valid flags */ 2573 src->flags = last_flags; 2574 put_be32(*((__be32 *) src + 3), (__be32 __iomem *) dst + 3); 2575 tx->req += cnt; 2576 mb(); 2577 } 2578 2579 /* 2580 * Transmit a packet. We need to split the packet so that a single 2581 * segment does not cross myri10ge->tx_boundary, so this makes segment 2582 * counting tricky. So rather than try to count segments up front, we 2583 * just give up if there are too few segments to hold a reasonably 2584 * fragmented packet currently available. If we run 2585 * out of segments while preparing a packet for DMA, we just linearize 2586 * it and try again. 2587 */ 2588 2589 static netdev_tx_t myri10ge_xmit(struct sk_buff *skb, 2590 struct net_device *dev) 2591 { 2592 struct myri10ge_priv *mgp = netdev_priv(dev); 2593 struct myri10ge_slice_state *ss; 2594 struct mcp_kreq_ether_send *req; 2595 struct myri10ge_tx_buf *tx; 2596 struct skb_frag_struct *frag; 2597 struct netdev_queue *netdev_queue; 2598 dma_addr_t bus; 2599 u32 low; 2600 __be32 high_swapped; 2601 unsigned int len; 2602 int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments; 2603 u16 pseudo_hdr_offset, cksum_offset, queue; 2604 int cum_len, seglen, boundary, rdma_count; 2605 u8 flags, odd_flag; 2606 2607 queue = skb_get_queue_mapping(skb); 2608 ss = &mgp->ss[queue]; 2609 netdev_queue = netdev_get_tx_queue(mgp->dev, queue); 2610 tx = &ss->tx; 2611 2612 again: 2613 req = tx->req_list; 2614 avail = tx->mask - 1 - (tx->req - tx->done); 2615 2616 mss = 0; 2617 max_segments = MXGEFW_MAX_SEND_DESC; 2618 2619 if (skb_is_gso(skb)) { 2620 mss = skb_shinfo(skb)->gso_size; 2621 max_segments = MYRI10GE_MAX_SEND_DESC_TSO; 2622 } 2623 2624 if ((unlikely(avail < max_segments))) { 2625 /* we are out of transmit resources */ 2626 tx->stop_queue++; 2627 netif_tx_stop_queue(netdev_queue); 2628 return NETDEV_TX_BUSY; 2629 } 2630 2631 /* Setup checksum offloading, if needed */ 2632 cksum_offset = 0; 2633 pseudo_hdr_offset = 0; 2634 odd_flag = 0; 2635 flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST); 2636 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 2637 cksum_offset = skb_checksum_start_offset(skb); 2638 pseudo_hdr_offset = cksum_offset + skb->csum_offset; 2639 /* If the headers are excessively large, then we must 2640 * fall back to a software checksum */ 2641 if (unlikely(!mss && (cksum_offset > 255 || 2642 pseudo_hdr_offset > 127))) { 2643 if (skb_checksum_help(skb)) 2644 goto drop; 2645 cksum_offset = 0; 2646 pseudo_hdr_offset = 0; 2647 } else { 2648 odd_flag = MXGEFW_FLAGS_ALIGN_ODD; 2649 flags |= MXGEFW_FLAGS_CKSUM; 2650 } 2651 } 2652 2653 cum_len = 0; 2654 2655 if (mss) { /* TSO */ 2656 /* this removes any CKSUM flag from before */ 2657 flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST); 2658 2659 /* negative cum_len signifies to the 2660 * send loop that we are still in the 2661 * header portion of the TSO packet. 2662 * TSO header can be at most 1KB long */ 2663 cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb)); 2664 2665 /* for IPv6 TSO, the checksum offset stores the 2666 * TCP header length, to save the firmware from 2667 * the need to parse the headers */ 2668 if (skb_is_gso_v6(skb)) { 2669 cksum_offset = tcp_hdrlen(skb); 2670 /* Can only handle headers <= max_tso6 long */ 2671 if (unlikely(-cum_len > mgp->max_tso6)) 2672 return myri10ge_sw_tso(skb, dev); 2673 } 2674 /* for TSO, pseudo_hdr_offset holds mss. 2675 * The firmware figures out where to put 2676 * the checksum by parsing the header. */ 2677 pseudo_hdr_offset = mss; 2678 } else 2679 /* Mark small packets, and pad out tiny packets */ 2680 if (skb->len <= MXGEFW_SEND_SMALL_SIZE) { 2681 flags |= MXGEFW_FLAGS_SMALL; 2682 2683 /* pad frames to at least ETH_ZLEN bytes */ 2684 if (unlikely(skb->len < ETH_ZLEN)) { 2685 if (skb_padto(skb, ETH_ZLEN)) { 2686 /* The packet is gone, so we must 2687 * return 0 */ 2688 ss->stats.tx_dropped += 1; 2689 return NETDEV_TX_OK; 2690 } 2691 /* adjust the len to account for the zero pad 2692 * so that the nic can know how long it is */ 2693 skb->len = ETH_ZLEN; 2694 } 2695 } 2696 2697 /* map the skb for DMA */ 2698 len = skb_headlen(skb); 2699 idx = tx->req & tx->mask; 2700 tx->info[idx].skb = skb; 2701 bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE); 2702 dma_unmap_addr_set(&tx->info[idx], bus, bus); 2703 dma_unmap_len_set(&tx->info[idx], len, len); 2704 2705 frag_cnt = skb_shinfo(skb)->nr_frags; 2706 frag_idx = 0; 2707 count = 0; 2708 rdma_count = 0; 2709 2710 /* "rdma_count" is the number of RDMAs belonging to the 2711 * current packet BEFORE the current send request. For 2712 * non-TSO packets, this is equal to "count". 2713 * For TSO packets, rdma_count needs to be reset 2714 * to 0 after a segment cut. 2715 * 2716 * The rdma_count field of the send request is 2717 * the number of RDMAs of the packet starting at 2718 * that request. For TSO send requests with one ore more cuts 2719 * in the middle, this is the number of RDMAs starting 2720 * after the last cut in the request. All previous 2721 * segments before the last cut implicitly have 1 RDMA. 2722 * 2723 * Since the number of RDMAs is not known beforehand, 2724 * it must be filled-in retroactively - after each 2725 * segmentation cut or at the end of the entire packet. 2726 */ 2727 2728 while (1) { 2729 /* Break the SKB or Fragment up into pieces which 2730 * do not cross mgp->tx_boundary */ 2731 low = MYRI10GE_LOWPART_TO_U32(bus); 2732 high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus)); 2733 while (len) { 2734 u8 flags_next; 2735 int cum_len_next; 2736 2737 if (unlikely(count == max_segments)) 2738 goto abort_linearize; 2739 2740 boundary = 2741 (low + mgp->tx_boundary) & ~(mgp->tx_boundary - 1); 2742 seglen = boundary - low; 2743 if (seglen > len) 2744 seglen = len; 2745 flags_next = flags & ~MXGEFW_FLAGS_FIRST; 2746 cum_len_next = cum_len + seglen; 2747 if (mss) { /* TSO */ 2748 (req - rdma_count)->rdma_count = rdma_count + 1; 2749 2750 if (likely(cum_len >= 0)) { /* payload */ 2751 int next_is_first, chop; 2752 2753 chop = (cum_len_next > mss); 2754 cum_len_next = cum_len_next % mss; 2755 next_is_first = (cum_len_next == 0); 2756 flags |= chop * MXGEFW_FLAGS_TSO_CHOP; 2757 flags_next |= next_is_first * 2758 MXGEFW_FLAGS_FIRST; 2759 rdma_count |= -(chop | next_is_first); 2760 rdma_count += chop & ~next_is_first; 2761 } else if (likely(cum_len_next >= 0)) { /* header ends */ 2762 int small; 2763 2764 rdma_count = -1; 2765 cum_len_next = 0; 2766 seglen = -cum_len; 2767 small = (mss <= MXGEFW_SEND_SMALL_SIZE); 2768 flags_next = MXGEFW_FLAGS_TSO_PLD | 2769 MXGEFW_FLAGS_FIRST | 2770 (small * MXGEFW_FLAGS_SMALL); 2771 } 2772 } 2773 req->addr_high = high_swapped; 2774 req->addr_low = htonl(low); 2775 req->pseudo_hdr_offset = htons(pseudo_hdr_offset); 2776 req->pad = 0; /* complete solid 16-byte block; does this matter? */ 2777 req->rdma_count = 1; 2778 req->length = htons(seglen); 2779 req->cksum_offset = cksum_offset; 2780 req->flags = flags | ((cum_len & 1) * odd_flag); 2781 2782 low += seglen; 2783 len -= seglen; 2784 cum_len = cum_len_next; 2785 flags = flags_next; 2786 req++; 2787 count++; 2788 rdma_count++; 2789 if (cksum_offset != 0 && !(mss && skb_is_gso_v6(skb))) { 2790 if (unlikely(cksum_offset > seglen)) 2791 cksum_offset -= seglen; 2792 else 2793 cksum_offset = 0; 2794 } 2795 } 2796 if (frag_idx == frag_cnt) 2797 break; 2798 2799 /* map next fragment for DMA */ 2800 idx = (count + tx->req) & tx->mask; 2801 frag = &skb_shinfo(skb)->frags[frag_idx]; 2802 frag_idx++; 2803 len = skb_frag_size(frag); 2804 bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len, 2805 DMA_TO_DEVICE); 2806 dma_unmap_addr_set(&tx->info[idx], bus, bus); 2807 dma_unmap_len_set(&tx->info[idx], len, len); 2808 } 2809 2810 (req - rdma_count)->rdma_count = rdma_count; 2811 if (mss) 2812 do { 2813 req--; 2814 req->flags |= MXGEFW_FLAGS_TSO_LAST; 2815 } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP | 2816 MXGEFW_FLAGS_FIRST))); 2817 idx = ((count - 1) + tx->req) & tx->mask; 2818 tx->info[idx].last = 1; 2819 myri10ge_submit_req(tx, tx->req_list, count); 2820 /* if using multiple tx queues, make sure NIC polls the 2821 * current slice */ 2822 if ((mgp->dev->real_num_tx_queues > 1) && tx->queue_active == 0) { 2823 tx->queue_active = 1; 2824 put_be32(htonl(1), tx->send_go); 2825 mb(); 2826 mmiowb(); 2827 } 2828 tx->pkt_start++; 2829 if ((avail - count) < MXGEFW_MAX_SEND_DESC) { 2830 tx->stop_queue++; 2831 netif_tx_stop_queue(netdev_queue); 2832 } 2833 return NETDEV_TX_OK; 2834 2835 abort_linearize: 2836 /* Free any DMA resources we've alloced and clear out the skb 2837 * slot so as to not trip up assertions, and to avoid a 2838 * double-free if linearizing fails */ 2839 2840 last_idx = (idx + 1) & tx->mask; 2841 idx = tx->req & tx->mask; 2842 tx->info[idx].skb = NULL; 2843 do { 2844 len = dma_unmap_len(&tx->info[idx], len); 2845 if (len) { 2846 if (tx->info[idx].skb != NULL) 2847 pci_unmap_single(mgp->pdev, 2848 dma_unmap_addr(&tx->info[idx], 2849 bus), len, 2850 PCI_DMA_TODEVICE); 2851 else 2852 pci_unmap_page(mgp->pdev, 2853 dma_unmap_addr(&tx->info[idx], 2854 bus), len, 2855 PCI_DMA_TODEVICE); 2856 dma_unmap_len_set(&tx->info[idx], len, 0); 2857 tx->info[idx].skb = NULL; 2858 } 2859 idx = (idx + 1) & tx->mask; 2860 } while (idx != last_idx); 2861 if (skb_is_gso(skb)) { 2862 netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n"); 2863 goto drop; 2864 } 2865 2866 if (skb_linearize(skb)) 2867 goto drop; 2868 2869 tx->linearized++; 2870 goto again; 2871 2872 drop: 2873 dev_kfree_skb_any(skb); 2874 ss->stats.tx_dropped += 1; 2875 return NETDEV_TX_OK; 2876 2877 } 2878 2879 static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb, 2880 struct net_device *dev) 2881 { 2882 struct sk_buff *segs, *curr; 2883 struct myri10ge_priv *mgp = netdev_priv(dev); 2884 struct myri10ge_slice_state *ss; 2885 netdev_tx_t status; 2886 2887 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); 2888 if (IS_ERR(segs)) 2889 goto drop; 2890 2891 while (segs) { 2892 curr = segs; 2893 segs = segs->next; 2894 curr->next = NULL; 2895 status = myri10ge_xmit(curr, dev); 2896 if (status != 0) { 2897 dev_kfree_skb_any(curr); 2898 if (segs != NULL) { 2899 curr = segs; 2900 segs = segs->next; 2901 curr->next = NULL; 2902 dev_kfree_skb_any(segs); 2903 } 2904 goto drop; 2905 } 2906 } 2907 dev_kfree_skb_any(skb); 2908 return NETDEV_TX_OK; 2909 2910 drop: 2911 ss = &mgp->ss[skb_get_queue_mapping(skb)]; 2912 dev_kfree_skb_any(skb); 2913 ss->stats.tx_dropped += 1; 2914 return NETDEV_TX_OK; 2915 } 2916 2917 static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev, 2918 struct rtnl_link_stats64 *stats) 2919 { 2920 const struct myri10ge_priv *mgp = netdev_priv(dev); 2921 const struct myri10ge_slice_netstats *slice_stats; 2922 int i; 2923 2924 for (i = 0; i < mgp->num_slices; i++) { 2925 slice_stats = &mgp->ss[i].stats; 2926 stats->rx_packets += slice_stats->rx_packets; 2927 stats->tx_packets += slice_stats->tx_packets; 2928 stats->rx_bytes += slice_stats->rx_bytes; 2929 stats->tx_bytes += slice_stats->tx_bytes; 2930 stats->rx_dropped += slice_stats->rx_dropped; 2931 stats->tx_dropped += slice_stats->tx_dropped; 2932 } 2933 return stats; 2934 } 2935 2936 static void myri10ge_set_multicast_list(struct net_device *dev) 2937 { 2938 struct myri10ge_priv *mgp = netdev_priv(dev); 2939 struct myri10ge_cmd cmd; 2940 struct netdev_hw_addr *ha; 2941 __be32 data[2] = { 0, 0 }; 2942 int err; 2943 2944 /* can be called from atomic contexts, 2945 * pass 1 to force atomicity in myri10ge_send_cmd() */ 2946 myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1); 2947 2948 /* This firmware is known to not support multicast */ 2949 if (!mgp->fw_multicast_support) 2950 return; 2951 2952 /* Disable multicast filtering */ 2953 2954 err = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1); 2955 if (err != 0) { 2956 netdev_err(dev, "Failed MXGEFW_ENABLE_ALLMULTI, error status: %d\n", 2957 err); 2958 goto abort; 2959 } 2960 2961 if ((dev->flags & IFF_ALLMULTI) || mgp->adopted_rx_filter_bug) { 2962 /* request to disable multicast filtering, so quit here */ 2963 return; 2964 } 2965 2966 /* Flush the filters */ 2967 2968 err = myri10ge_send_cmd(mgp, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, 2969 &cmd, 1); 2970 if (err != 0) { 2971 netdev_err(dev, "Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, error status: %d\n", 2972 err); 2973 goto abort; 2974 } 2975 2976 /* Walk the multicast list, and add each address */ 2977 netdev_for_each_mc_addr(ha, dev) { 2978 memcpy(data, &ha->addr, 6); 2979 cmd.data0 = ntohl(data[0]); 2980 cmd.data1 = ntohl(data[1]); 2981 err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP, 2982 &cmd, 1); 2983 2984 if (err != 0) { 2985 netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n", 2986 err, ha->addr); 2987 goto abort; 2988 } 2989 } 2990 /* Enable multicast filtering */ 2991 err = myri10ge_send_cmd(mgp, MXGEFW_DISABLE_ALLMULTI, &cmd, 1); 2992 if (err != 0) { 2993 netdev_err(dev, "Failed MXGEFW_DISABLE_ALLMULTI, error status: %d\n", 2994 err); 2995 goto abort; 2996 } 2997 2998 return; 2999 3000 abort: 3001 return; 3002 } 3003 3004 static int myri10ge_set_mac_address(struct net_device *dev, void *addr) 3005 { 3006 struct sockaddr *sa = addr; 3007 struct myri10ge_priv *mgp = netdev_priv(dev); 3008 int status; 3009 3010 if (!is_valid_ether_addr(sa->sa_data)) 3011 return -EADDRNOTAVAIL; 3012 3013 status = myri10ge_update_mac_address(mgp, sa->sa_data); 3014 if (status != 0) { 3015 netdev_err(dev, "changing mac address failed with %d\n", 3016 status); 3017 return status; 3018 } 3019 3020 /* change the dev structure */ 3021 memcpy(dev->dev_addr, sa->sa_data, 6); 3022 return 0; 3023 } 3024 3025 static int myri10ge_change_mtu(struct net_device *dev, int new_mtu) 3026 { 3027 struct myri10ge_priv *mgp = netdev_priv(dev); 3028 int error = 0; 3029 3030 if ((new_mtu < 68) || (ETH_HLEN + new_mtu > MYRI10GE_MAX_ETHER_MTU)) { 3031 netdev_err(dev, "new mtu (%d) is not valid\n", new_mtu); 3032 return -EINVAL; 3033 } 3034 netdev_info(dev, "changing mtu from %d to %d\n", dev->mtu, new_mtu); 3035 if (mgp->running) { 3036 /* if we change the mtu on an active device, we must 3037 * reset the device so the firmware sees the change */ 3038 myri10ge_close(dev); 3039 dev->mtu = new_mtu; 3040 myri10ge_open(dev); 3041 } else 3042 dev->mtu = new_mtu; 3043 3044 return error; 3045 } 3046 3047 /* 3048 * Enable ECRC to align PCI-E Completion packets on an 8-byte boundary. 3049 * Only do it if the bridge is a root port since we don't want to disturb 3050 * any other device, except if forced with myri10ge_ecrc_enable > 1. 3051 */ 3052 3053 static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp) 3054 { 3055 struct pci_dev *bridge = mgp->pdev->bus->self; 3056 struct device *dev = &mgp->pdev->dev; 3057 int cap; 3058 unsigned err_cap; 3059 int ret; 3060 3061 if (!myri10ge_ecrc_enable || !bridge) 3062 return; 3063 3064 /* check that the bridge is a root port */ 3065 if (pci_pcie_type(bridge) != PCI_EXP_TYPE_ROOT_PORT) { 3066 if (myri10ge_ecrc_enable > 1) { 3067 struct pci_dev *prev_bridge, *old_bridge = bridge; 3068 3069 /* Walk the hierarchy up to the root port 3070 * where ECRC has to be enabled */ 3071 do { 3072 prev_bridge = bridge; 3073 bridge = bridge->bus->self; 3074 if (!bridge || prev_bridge == bridge) { 3075 dev_err(dev, 3076 "Failed to find root port" 3077 " to force ECRC\n"); 3078 return; 3079 } 3080 } while (pci_pcie_type(bridge) != 3081 PCI_EXP_TYPE_ROOT_PORT); 3082 3083 dev_info(dev, 3084 "Forcing ECRC on non-root port %s" 3085 " (enabling on root port %s)\n", 3086 pci_name(old_bridge), pci_name(bridge)); 3087 } else { 3088 dev_err(dev, 3089 "Not enabling ECRC on non-root port %s\n", 3090 pci_name(bridge)); 3091 return; 3092 } 3093 } 3094 3095 cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR); 3096 if (!cap) 3097 return; 3098 3099 ret = pci_read_config_dword(bridge, cap + PCI_ERR_CAP, &err_cap); 3100 if (ret) { 3101 dev_err(dev, "failed reading ext-conf-space of %s\n", 3102 pci_name(bridge)); 3103 dev_err(dev, "\t pci=nommconf in use? " 3104 "or buggy/incomplete/absent ACPI MCFG attr?\n"); 3105 return; 3106 } 3107 if (!(err_cap & PCI_ERR_CAP_ECRC_GENC)) 3108 return; 3109 3110 err_cap |= PCI_ERR_CAP_ECRC_GENE; 3111 pci_write_config_dword(bridge, cap + PCI_ERR_CAP, err_cap); 3112 dev_info(dev, "Enabled ECRC on upstream bridge %s\n", pci_name(bridge)); 3113 } 3114 3115 /* 3116 * The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput 3117 * when the PCI-E Completion packets are aligned on an 8-byte 3118 * boundary. Some PCI-E chip sets always align Completion packets; on 3119 * the ones that do not, the alignment can be enforced by enabling 3120 * ECRC generation (if supported). 3121 * 3122 * When PCI-E Completion packets are not aligned, it is actually more 3123 * efficient to limit Read-DMA transactions to 2KB, rather than 4KB. 3124 * 3125 * If the driver can neither enable ECRC nor verify that it has 3126 * already been enabled, then it must use a firmware image which works 3127 * around unaligned completion packets (myri10ge_rss_ethp_z8e.dat), and it 3128 * should also ensure that it never gives the device a Read-DMA which is 3129 * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is 3130 * enabled, then the driver should use the aligned (myri10ge_rss_eth_z8e.dat) 3131 * firmware image, and set tx_boundary to 4KB. 3132 */ 3133 3134 static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) 3135 { 3136 struct pci_dev *pdev = mgp->pdev; 3137 struct device *dev = &pdev->dev; 3138 int status; 3139 3140 mgp->tx_boundary = 4096; 3141 /* 3142 * Verify the max read request size was set to 4KB 3143 * before trying the test with 4KB. 3144 */ 3145 status = pcie_get_readrq(pdev); 3146 if (status < 0) { 3147 dev_err(dev, "Couldn't read max read req size: %d\n", status); 3148 goto abort; 3149 } 3150 if (status != 4096) { 3151 dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status); 3152 mgp->tx_boundary = 2048; 3153 } 3154 /* 3155 * load the optimized firmware (which assumes aligned PCIe 3156 * completions) in order to see if it works on this host. 3157 */ 3158 set_fw_name(mgp, myri10ge_fw_aligned, false); 3159 status = myri10ge_load_firmware(mgp, 1); 3160 if (status != 0) { 3161 goto abort; 3162 } 3163 3164 /* 3165 * Enable ECRC if possible 3166 */ 3167 myri10ge_enable_ecrc(mgp); 3168 3169 /* 3170 * Run a DMA test which watches for unaligned completions and 3171 * aborts on the first one seen. 3172 */ 3173 3174 status = myri10ge_dma_test(mgp, MXGEFW_CMD_UNALIGNED_TEST); 3175 if (status == 0) 3176 return; /* keep the aligned firmware */ 3177 3178 if (status != -E2BIG) 3179 dev_warn(dev, "DMA test failed: %d\n", status); 3180 if (status == -ENOSYS) 3181 dev_warn(dev, "Falling back to ethp! " 3182 "Please install up to date fw\n"); 3183 abort: 3184 /* fall back to using the unaligned firmware */ 3185 mgp->tx_boundary = 2048; 3186 set_fw_name(mgp, myri10ge_fw_unaligned, false); 3187 } 3188 3189 static void myri10ge_select_firmware(struct myri10ge_priv *mgp) 3190 { 3191 int overridden = 0; 3192 3193 if (myri10ge_force_firmware == 0) { 3194 int link_width; 3195 u16 lnk; 3196 3197 pcie_capability_read_word(mgp->pdev, PCI_EXP_LNKSTA, &lnk); 3198 link_width = (lnk >> 4) & 0x3f; 3199 3200 /* Check to see if Link is less than 8 or if the 3201 * upstream bridge is known to provide aligned 3202 * completions */ 3203 if (link_width < 8) { 3204 dev_info(&mgp->pdev->dev, "PCIE x%d Link\n", 3205 link_width); 3206 mgp->tx_boundary = 4096; 3207 set_fw_name(mgp, myri10ge_fw_aligned, false); 3208 } else { 3209 myri10ge_firmware_probe(mgp); 3210 } 3211 } else { 3212 if (myri10ge_force_firmware == 1) { 3213 dev_info(&mgp->pdev->dev, 3214 "Assuming aligned completions (forced)\n"); 3215 mgp->tx_boundary = 4096; 3216 set_fw_name(mgp, myri10ge_fw_aligned, false); 3217 } else { 3218 dev_info(&mgp->pdev->dev, 3219 "Assuming unaligned completions (forced)\n"); 3220 mgp->tx_boundary = 2048; 3221 set_fw_name(mgp, myri10ge_fw_unaligned, false); 3222 } 3223 } 3224 3225 kparam_block_sysfs_write(myri10ge_fw_name); 3226 if (myri10ge_fw_name != NULL) { 3227 char *fw_name = kstrdup(myri10ge_fw_name, GFP_KERNEL); 3228 if (fw_name) { 3229 overridden = 1; 3230 set_fw_name(mgp, fw_name, true); 3231 } 3232 } 3233 kparam_unblock_sysfs_write(myri10ge_fw_name); 3234 3235 if (mgp->board_number < MYRI10GE_MAX_BOARDS && 3236 myri10ge_fw_names[mgp->board_number] != NULL && 3237 strlen(myri10ge_fw_names[mgp->board_number])) { 3238 set_fw_name(mgp, myri10ge_fw_names[mgp->board_number], false); 3239 overridden = 1; 3240 } 3241 if (overridden) 3242 dev_info(&mgp->pdev->dev, "overriding firmware to %s\n", 3243 mgp->fw_name); 3244 } 3245 3246 static void myri10ge_mask_surprise_down(struct pci_dev *pdev) 3247 { 3248 struct pci_dev *bridge = pdev->bus->self; 3249 int cap; 3250 u32 mask; 3251 3252 if (bridge == NULL) 3253 return; 3254 3255 cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR); 3256 if (cap) { 3257 /* a sram parity error can cause a surprise link 3258 * down; since we expect and can recover from sram 3259 * parity errors, mask surprise link down events */ 3260 pci_read_config_dword(bridge, cap + PCI_ERR_UNCOR_MASK, &mask); 3261 mask |= 0x20; 3262 pci_write_config_dword(bridge, cap + PCI_ERR_UNCOR_MASK, mask); 3263 } 3264 } 3265 3266 #ifdef CONFIG_PM 3267 static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state) 3268 { 3269 struct myri10ge_priv *mgp; 3270 struct net_device *netdev; 3271 3272 mgp = pci_get_drvdata(pdev); 3273 if (mgp == NULL) 3274 return -EINVAL; 3275 netdev = mgp->dev; 3276 3277 netif_device_detach(netdev); 3278 if (netif_running(netdev)) { 3279 netdev_info(netdev, "closing\n"); 3280 rtnl_lock(); 3281 myri10ge_close(netdev); 3282 rtnl_unlock(); 3283 } 3284 myri10ge_dummy_rdma(mgp, 0); 3285 pci_save_state(pdev); 3286 pci_disable_device(pdev); 3287 3288 return pci_set_power_state(pdev, pci_choose_state(pdev, state)); 3289 } 3290 3291 static int myri10ge_resume(struct pci_dev *pdev) 3292 { 3293 struct myri10ge_priv *mgp; 3294 struct net_device *netdev; 3295 int status; 3296 u16 vendor; 3297 3298 mgp = pci_get_drvdata(pdev); 3299 if (mgp == NULL) 3300 return -EINVAL; 3301 netdev = mgp->dev; 3302 pci_set_power_state(pdev, PCI_D0); /* zeros conf space as a side effect */ 3303 msleep(5); /* give card time to respond */ 3304 pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor); 3305 if (vendor == 0xffff) { 3306 netdev_err(mgp->dev, "device disappeared!\n"); 3307 return -EIO; 3308 } 3309 3310 pci_restore_state(pdev); 3311 3312 status = pci_enable_device(pdev); 3313 if (status) { 3314 dev_err(&pdev->dev, "failed to enable device\n"); 3315 return status; 3316 } 3317 3318 pci_set_master(pdev); 3319 3320 myri10ge_reset(mgp); 3321 myri10ge_dummy_rdma(mgp, 1); 3322 3323 /* Save configuration space to be restored if the 3324 * nic resets due to a parity error */ 3325 pci_save_state(pdev); 3326 3327 if (netif_running(netdev)) { 3328 rtnl_lock(); 3329 status = myri10ge_open(netdev); 3330 rtnl_unlock(); 3331 if (status != 0) 3332 goto abort_with_enabled; 3333 3334 } 3335 netif_device_attach(netdev); 3336 3337 return 0; 3338 3339 abort_with_enabled: 3340 pci_disable_device(pdev); 3341 return -EIO; 3342 3343 } 3344 #endif /* CONFIG_PM */ 3345 3346 static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp) 3347 { 3348 struct pci_dev *pdev = mgp->pdev; 3349 int vs = mgp->vendor_specific_offset; 3350 u32 reboot; 3351 3352 /*enter read32 mode */ 3353 pci_write_config_byte(pdev, vs + 0x10, 0x3); 3354 3355 /*read REBOOT_STATUS (0xfffffff0) */ 3356 pci_write_config_dword(pdev, vs + 0x18, 0xfffffff0); 3357 pci_read_config_dword(pdev, vs + 0x14, &reboot); 3358 return reboot; 3359 } 3360 3361 static void 3362 myri10ge_check_slice(struct myri10ge_slice_state *ss, int *reset_needed, 3363 int *busy_slice_cnt, u32 rx_pause_cnt) 3364 { 3365 struct myri10ge_priv *mgp = ss->mgp; 3366 int slice = ss - mgp->ss; 3367 3368 if (ss->tx.req != ss->tx.done && 3369 ss->tx.done == ss->watchdog_tx_done && 3370 ss->watchdog_tx_req != ss->watchdog_tx_done) { 3371 /* nic seems like it might be stuck.. */ 3372 if (rx_pause_cnt != mgp->watchdog_pause) { 3373 if (net_ratelimit()) 3374 netdev_warn(mgp->dev, "slice %d: TX paused, " 3375 "check link partner\n", slice); 3376 } else { 3377 netdev_warn(mgp->dev, 3378 "slice %d: TX stuck %d %d %d %d %d %d\n", 3379 slice, ss->tx.queue_active, ss->tx.req, 3380 ss->tx.done, ss->tx.pkt_start, 3381 ss->tx.pkt_done, 3382 (int)ntohl(mgp->ss[slice].fw_stats-> 3383 send_done_count)); 3384 *reset_needed = 1; 3385 ss->stuck = 1; 3386 } 3387 } 3388 if (ss->watchdog_tx_done != ss->tx.done || 3389 ss->watchdog_rx_done != ss->rx_done.cnt) { 3390 *busy_slice_cnt += 1; 3391 } 3392 ss->watchdog_tx_done = ss->tx.done; 3393 ss->watchdog_tx_req = ss->tx.req; 3394 ss->watchdog_rx_done = ss->rx_done.cnt; 3395 } 3396 3397 /* 3398 * This watchdog is used to check whether the board has suffered 3399 * from a parity error and needs to be recovered. 3400 */ 3401 static void myri10ge_watchdog(struct work_struct *work) 3402 { 3403 struct myri10ge_priv *mgp = 3404 container_of(work, struct myri10ge_priv, watchdog_work); 3405 struct myri10ge_slice_state *ss; 3406 u32 reboot, rx_pause_cnt; 3407 int status, rebooted; 3408 int i; 3409 int reset_needed = 0; 3410 int busy_slice_cnt = 0; 3411 u16 cmd, vendor; 3412 3413 mgp->watchdog_resets++; 3414 pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd); 3415 rebooted = 0; 3416 if ((cmd & PCI_COMMAND_MASTER) == 0) { 3417 /* Bus master DMA disabled? Check to see 3418 * if the card rebooted due to a parity error 3419 * For now, just report it */ 3420 reboot = myri10ge_read_reboot(mgp); 3421 netdev_err(mgp->dev, "NIC rebooted (0x%x),%s resetting\n", 3422 reboot, myri10ge_reset_recover ? "" : " not"); 3423 if (myri10ge_reset_recover == 0) 3424 return; 3425 rtnl_lock(); 3426 mgp->rebooted = 1; 3427 rebooted = 1; 3428 myri10ge_close(mgp->dev); 3429 myri10ge_reset_recover--; 3430 mgp->rebooted = 0; 3431 /* 3432 * A rebooted nic will come back with config space as 3433 * it was after power was applied to PCIe bus. 3434 * Attempt to restore config space which was saved 3435 * when the driver was loaded, or the last time the 3436 * nic was resumed from power saving mode. 3437 */ 3438 pci_restore_state(mgp->pdev); 3439 3440 /* save state again for accounting reasons */ 3441 pci_save_state(mgp->pdev); 3442 3443 } else { 3444 /* if we get back -1's from our slot, perhaps somebody 3445 * powered off our card. Don't try to reset it in 3446 * this case */ 3447 if (cmd == 0xffff) { 3448 pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor); 3449 if (vendor == 0xffff) { 3450 netdev_err(mgp->dev, "device disappeared!\n"); 3451 return; 3452 } 3453 } 3454 /* Perhaps it is a software error. See if stuck slice 3455 * has recovered, reset if not */ 3456 rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause); 3457 for (i = 0; i < mgp->num_slices; i++) { 3458 ss = mgp->ss; 3459 if (ss->stuck) { 3460 myri10ge_check_slice(ss, &reset_needed, 3461 &busy_slice_cnt, 3462 rx_pause_cnt); 3463 ss->stuck = 0; 3464 } 3465 } 3466 if (!reset_needed) { 3467 netdev_dbg(mgp->dev, "not resetting\n"); 3468 return; 3469 } 3470 3471 netdev_err(mgp->dev, "device timeout, resetting\n"); 3472 } 3473 3474 if (!rebooted) { 3475 rtnl_lock(); 3476 myri10ge_close(mgp->dev); 3477 } 3478 status = myri10ge_load_firmware(mgp, 1); 3479 if (status != 0) 3480 netdev_err(mgp->dev, "failed to load firmware\n"); 3481 else 3482 myri10ge_open(mgp->dev); 3483 rtnl_unlock(); 3484 } 3485 3486 /* 3487 * We use our own timer routine rather than relying upon 3488 * netdev->tx_timeout because we have a very large hardware transmit 3489 * queue. Due to the large queue, the netdev->tx_timeout function 3490 * cannot detect a NIC with a parity error in a timely fashion if the 3491 * NIC is lightly loaded. 3492 */ 3493 static void myri10ge_watchdog_timer(unsigned long arg) 3494 { 3495 struct myri10ge_priv *mgp; 3496 struct myri10ge_slice_state *ss; 3497 int i, reset_needed, busy_slice_cnt; 3498 u32 rx_pause_cnt; 3499 u16 cmd; 3500 3501 mgp = (struct myri10ge_priv *)arg; 3502 3503 rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause); 3504 busy_slice_cnt = 0; 3505 for (i = 0, reset_needed = 0; 3506 i < mgp->num_slices && reset_needed == 0; ++i) { 3507 3508 ss = &mgp->ss[i]; 3509 if (ss->rx_small.watchdog_needed) { 3510 myri10ge_alloc_rx_pages(mgp, &ss->rx_small, 3511 mgp->small_bytes + MXGEFW_PAD, 3512 1); 3513 if (ss->rx_small.fill_cnt - ss->rx_small.cnt >= 3514 myri10ge_fill_thresh) 3515 ss->rx_small.watchdog_needed = 0; 3516 } 3517 if (ss->rx_big.watchdog_needed) { 3518 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, 3519 mgp->big_bytes, 1); 3520 if (ss->rx_big.fill_cnt - ss->rx_big.cnt >= 3521 myri10ge_fill_thresh) 3522 ss->rx_big.watchdog_needed = 0; 3523 } 3524 myri10ge_check_slice(ss, &reset_needed, &busy_slice_cnt, 3525 rx_pause_cnt); 3526 } 3527 /* if we've sent or received no traffic, poll the NIC to 3528 * ensure it is still there. Otherwise, we risk not noticing 3529 * an error in a timely fashion */ 3530 if (busy_slice_cnt == 0) { 3531 pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd); 3532 if ((cmd & PCI_COMMAND_MASTER) == 0) { 3533 reset_needed = 1; 3534 } 3535 } 3536 mgp->watchdog_pause = rx_pause_cnt; 3537 3538 if (reset_needed) { 3539 schedule_work(&mgp->watchdog_work); 3540 } else { 3541 /* rearm timer */ 3542 mod_timer(&mgp->watchdog_timer, 3543 jiffies + myri10ge_watchdog_timeout * HZ); 3544 } 3545 } 3546 3547 static void myri10ge_free_slices(struct myri10ge_priv *mgp) 3548 { 3549 struct myri10ge_slice_state *ss; 3550 struct pci_dev *pdev = mgp->pdev; 3551 size_t bytes; 3552 int i; 3553 3554 if (mgp->ss == NULL) 3555 return; 3556 3557 for (i = 0; i < mgp->num_slices; i++) { 3558 ss = &mgp->ss[i]; 3559 if (ss->rx_done.entry != NULL) { 3560 bytes = mgp->max_intr_slots * 3561 sizeof(*ss->rx_done.entry); 3562 dma_free_coherent(&pdev->dev, bytes, 3563 ss->rx_done.entry, ss->rx_done.bus); 3564 ss->rx_done.entry = NULL; 3565 } 3566 if (ss->fw_stats != NULL) { 3567 bytes = sizeof(*ss->fw_stats); 3568 dma_free_coherent(&pdev->dev, bytes, 3569 ss->fw_stats, ss->fw_stats_bus); 3570 ss->fw_stats = NULL; 3571 } 3572 netif_napi_del(&ss->napi); 3573 } 3574 kfree(mgp->ss); 3575 mgp->ss = NULL; 3576 } 3577 3578 static int myri10ge_alloc_slices(struct myri10ge_priv *mgp) 3579 { 3580 struct myri10ge_slice_state *ss; 3581 struct pci_dev *pdev = mgp->pdev; 3582 size_t bytes; 3583 int i; 3584 3585 bytes = sizeof(*mgp->ss) * mgp->num_slices; 3586 mgp->ss = kzalloc(bytes, GFP_KERNEL); 3587 if (mgp->ss == NULL) { 3588 return -ENOMEM; 3589 } 3590 3591 for (i = 0; i < mgp->num_slices; i++) { 3592 ss = &mgp->ss[i]; 3593 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry); 3594 ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, 3595 &ss->rx_done.bus, 3596 GFP_KERNEL | __GFP_ZERO); 3597 if (ss->rx_done.entry == NULL) 3598 goto abort; 3599 bytes = sizeof(*ss->fw_stats); 3600 ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes, 3601 &ss->fw_stats_bus, 3602 GFP_KERNEL); 3603 if (ss->fw_stats == NULL) 3604 goto abort; 3605 ss->mgp = mgp; 3606 ss->dev = mgp->dev; 3607 netif_napi_add(ss->dev, &ss->napi, myri10ge_poll, 3608 myri10ge_napi_weight); 3609 } 3610 return 0; 3611 abort: 3612 myri10ge_free_slices(mgp); 3613 return -ENOMEM; 3614 } 3615 3616 /* 3617 * This function determines the number of slices supported. 3618 * The number slices is the minimum of the number of CPUS, 3619 * the number of MSI-X irqs supported, the number of slices 3620 * supported by the firmware 3621 */ 3622 static void myri10ge_probe_slices(struct myri10ge_priv *mgp) 3623 { 3624 struct myri10ge_cmd cmd; 3625 struct pci_dev *pdev = mgp->pdev; 3626 char *old_fw; 3627 bool old_allocated; 3628 int i, status, ncpus, msix_cap; 3629 3630 mgp->num_slices = 1; 3631 msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 3632 ncpus = netif_get_num_default_rss_queues(); 3633 3634 if (myri10ge_max_slices == 1 || msix_cap == 0 || 3635 (myri10ge_max_slices == -1 && ncpus < 2)) 3636 return; 3637 3638 /* try to load the slice aware rss firmware */ 3639 old_fw = mgp->fw_name; 3640 old_allocated = mgp->fw_name_allocated; 3641 /* don't free old_fw if we override it. */ 3642 mgp->fw_name_allocated = false; 3643 3644 if (myri10ge_fw_name != NULL) { 3645 dev_info(&mgp->pdev->dev, "overriding rss firmware to %s\n", 3646 myri10ge_fw_name); 3647 set_fw_name(mgp, myri10ge_fw_name, false); 3648 } else if (old_fw == myri10ge_fw_aligned) 3649 set_fw_name(mgp, myri10ge_fw_rss_aligned, false); 3650 else 3651 set_fw_name(mgp, myri10ge_fw_rss_unaligned, false); 3652 status = myri10ge_load_firmware(mgp, 0); 3653 if (status != 0) { 3654 dev_info(&pdev->dev, "Rss firmware not found\n"); 3655 if (old_allocated) 3656 kfree(old_fw); 3657 return; 3658 } 3659 3660 /* hit the board with a reset to ensure it is alive */ 3661 memset(&cmd, 0, sizeof(cmd)); 3662 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0); 3663 if (status != 0) { 3664 dev_err(&mgp->pdev->dev, "failed reset\n"); 3665 goto abort_with_fw; 3666 } 3667 3668 mgp->max_intr_slots = cmd.data0 / sizeof(struct mcp_slot); 3669 3670 /* tell it the size of the interrupt queues */ 3671 cmd.data0 = mgp->max_intr_slots * sizeof(struct mcp_slot); 3672 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0); 3673 if (status != 0) { 3674 dev_err(&mgp->pdev->dev, "failed MXGEFW_CMD_SET_INTRQ_SIZE\n"); 3675 goto abort_with_fw; 3676 } 3677 3678 /* ask the maximum number of slices it supports */ 3679 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES, &cmd, 0); 3680 if (status != 0) 3681 goto abort_with_fw; 3682 else 3683 mgp->num_slices = cmd.data0; 3684 3685 /* Only allow multiple slices if MSI-X is usable */ 3686 if (!myri10ge_msi) { 3687 goto abort_with_fw; 3688 } 3689 3690 /* if the admin did not specify a limit to how many 3691 * slices we should use, cap it automatically to the 3692 * number of CPUs currently online */ 3693 if (myri10ge_max_slices == -1) 3694 myri10ge_max_slices = ncpus; 3695 3696 if (mgp->num_slices > myri10ge_max_slices) 3697 mgp->num_slices = myri10ge_max_slices; 3698 3699 /* Now try to allocate as many MSI-X vectors as we have 3700 * slices. We give up on MSI-X if we can only get a single 3701 * vector. */ 3702 3703 mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors), 3704 GFP_KERNEL); 3705 if (mgp->msix_vectors == NULL) 3706 goto disable_msix; 3707 for (i = 0; i < mgp->num_slices; i++) { 3708 mgp->msix_vectors[i].entry = i; 3709 } 3710 3711 while (mgp->num_slices > 1) { 3712 /* make sure it is a power of two */ 3713 while (!is_power_of_2(mgp->num_slices)) 3714 mgp->num_slices--; 3715 if (mgp->num_slices == 1) 3716 goto disable_msix; 3717 status = pci_enable_msix(pdev, mgp->msix_vectors, 3718 mgp->num_slices); 3719 if (status == 0) { 3720 pci_disable_msix(pdev); 3721 if (old_allocated) 3722 kfree(old_fw); 3723 return; 3724 } 3725 if (status > 0) 3726 mgp->num_slices = status; 3727 else 3728 goto disable_msix; 3729 } 3730 3731 disable_msix: 3732 if (mgp->msix_vectors != NULL) { 3733 kfree(mgp->msix_vectors); 3734 mgp->msix_vectors = NULL; 3735 } 3736 3737 abort_with_fw: 3738 mgp->num_slices = 1; 3739 set_fw_name(mgp, old_fw, old_allocated); 3740 myri10ge_load_firmware(mgp, 0); 3741 } 3742 3743 static const struct net_device_ops myri10ge_netdev_ops = { 3744 .ndo_open = myri10ge_open, 3745 .ndo_stop = myri10ge_close, 3746 .ndo_start_xmit = myri10ge_xmit, 3747 .ndo_get_stats64 = myri10ge_get_stats, 3748 .ndo_validate_addr = eth_validate_addr, 3749 .ndo_change_mtu = myri10ge_change_mtu, 3750 .ndo_set_rx_mode = myri10ge_set_multicast_list, 3751 .ndo_set_mac_address = myri10ge_set_mac_address, 3752 }; 3753 3754 static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3755 { 3756 struct net_device *netdev; 3757 struct myri10ge_priv *mgp; 3758 struct device *dev = &pdev->dev; 3759 int i; 3760 int status = -ENXIO; 3761 int dac_enabled; 3762 unsigned hdr_offset, ss_offset; 3763 static int board_number; 3764 3765 netdev = alloc_etherdev_mq(sizeof(*mgp), MYRI10GE_MAX_SLICES); 3766 if (netdev == NULL) 3767 return -ENOMEM; 3768 3769 SET_NETDEV_DEV(netdev, &pdev->dev); 3770 3771 mgp = netdev_priv(netdev); 3772 mgp->dev = netdev; 3773 mgp->pdev = pdev; 3774 mgp->pause = myri10ge_flow_control; 3775 mgp->intr_coal_delay = myri10ge_intr_coal_delay; 3776 mgp->msg_enable = netif_msg_init(myri10ge_debug, MYRI10GE_MSG_DEFAULT); 3777 mgp->board_number = board_number; 3778 init_waitqueue_head(&mgp->down_wq); 3779 3780 if (pci_enable_device(pdev)) { 3781 dev_err(&pdev->dev, "pci_enable_device call failed\n"); 3782 status = -ENODEV; 3783 goto abort_with_netdev; 3784 } 3785 3786 /* Find the vendor-specific cap so we can check 3787 * the reboot register later on */ 3788 mgp->vendor_specific_offset 3789 = pci_find_capability(pdev, PCI_CAP_ID_VNDR); 3790 3791 /* Set our max read request to 4KB */ 3792 status = pcie_set_readrq(pdev, 4096); 3793 if (status != 0) { 3794 dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n", 3795 status); 3796 goto abort_with_enabled; 3797 } 3798 3799 myri10ge_mask_surprise_down(pdev); 3800 pci_set_master(pdev); 3801 dac_enabled = 1; 3802 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 3803 if (status != 0) { 3804 dac_enabled = 0; 3805 dev_err(&pdev->dev, 3806 "64-bit pci address mask was refused, " 3807 "trying 32-bit\n"); 3808 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3809 } 3810 if (status != 0) { 3811 dev_err(&pdev->dev, "Error %d setting DMA mask\n", status); 3812 goto abort_with_enabled; 3813 } 3814 (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3815 mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), 3816 &mgp->cmd_bus, GFP_KERNEL); 3817 if (mgp->cmd == NULL) 3818 goto abort_with_enabled; 3819 3820 mgp->board_span = pci_resource_len(pdev, 0); 3821 mgp->iomem_base = pci_resource_start(pdev, 0); 3822 mgp->mtrr = -1; 3823 mgp->wc_enabled = 0; 3824 #ifdef CONFIG_MTRR 3825 mgp->mtrr = mtrr_add(mgp->iomem_base, mgp->board_span, 3826 MTRR_TYPE_WRCOMB, 1); 3827 if (mgp->mtrr >= 0) 3828 mgp->wc_enabled = 1; 3829 #endif 3830 mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span); 3831 if (mgp->sram == NULL) { 3832 dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n", 3833 mgp->board_span, mgp->iomem_base); 3834 status = -ENXIO; 3835 goto abort_with_mtrr; 3836 } 3837 hdr_offset = 3838 swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET)) & 0xffffc; 3839 ss_offset = hdr_offset + offsetof(struct mcp_gen_header, string_specs); 3840 mgp->sram_size = swab32(readl(mgp->sram + ss_offset)); 3841 if (mgp->sram_size > mgp->board_span || 3842 mgp->sram_size <= MYRI10GE_FW_OFFSET) { 3843 dev_err(&pdev->dev, 3844 "invalid sram_size %dB or board span %ldB\n", 3845 mgp->sram_size, mgp->board_span); 3846 goto abort_with_ioremap; 3847 } 3848 memcpy_fromio(mgp->eeprom_strings, 3849 mgp->sram + mgp->sram_size, MYRI10GE_EEPROM_STRINGS_SIZE); 3850 memset(mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE - 2, 0, 2); 3851 status = myri10ge_read_mac_addr(mgp); 3852 if (status) 3853 goto abort_with_ioremap; 3854 3855 for (i = 0; i < ETH_ALEN; i++) 3856 netdev->dev_addr[i] = mgp->mac_addr[i]; 3857 3858 myri10ge_select_firmware(mgp); 3859 3860 status = myri10ge_load_firmware(mgp, 1); 3861 if (status != 0) { 3862 dev_err(&pdev->dev, "failed to load firmware\n"); 3863 goto abort_with_ioremap; 3864 } 3865 myri10ge_probe_slices(mgp); 3866 status = myri10ge_alloc_slices(mgp); 3867 if (status != 0) { 3868 dev_err(&pdev->dev, "failed to alloc slice state\n"); 3869 goto abort_with_firmware; 3870 } 3871 netif_set_real_num_tx_queues(netdev, mgp->num_slices); 3872 netif_set_real_num_rx_queues(netdev, mgp->num_slices); 3873 status = myri10ge_reset(mgp); 3874 if (status != 0) { 3875 dev_err(&pdev->dev, "failed reset\n"); 3876 goto abort_with_slices; 3877 } 3878 #ifdef CONFIG_MYRI10GE_DCA 3879 myri10ge_setup_dca(mgp); 3880 #endif 3881 pci_set_drvdata(pdev, mgp); 3882 if ((myri10ge_initial_mtu + ETH_HLEN) > MYRI10GE_MAX_ETHER_MTU) 3883 myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; 3884 if ((myri10ge_initial_mtu + ETH_HLEN) < 68) 3885 myri10ge_initial_mtu = 68; 3886 3887 netdev->netdev_ops = &myri10ge_netdev_ops; 3888 netdev->mtu = myri10ge_initial_mtu; 3889 netdev->hw_features = mgp->features | NETIF_F_RXCSUM; 3890 3891 /* fake NETIF_F_HW_VLAN_CTAG_RX for good GRO performance */ 3892 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 3893 3894 netdev->features = netdev->hw_features; 3895 3896 if (dac_enabled) 3897 netdev->features |= NETIF_F_HIGHDMA; 3898 3899 netdev->vlan_features |= mgp->features; 3900 if (mgp->fw_ver_tiny < 37) 3901 netdev->vlan_features &= ~NETIF_F_TSO6; 3902 if (mgp->fw_ver_tiny < 32) 3903 netdev->vlan_features &= ~NETIF_F_TSO; 3904 3905 /* make sure we can get an irq, and that MSI can be 3906 * setup (if available). */ 3907 status = myri10ge_request_irq(mgp); 3908 if (status != 0) 3909 goto abort_with_firmware; 3910 myri10ge_free_irq(mgp); 3911 3912 /* Save configuration space to be restored if the 3913 * nic resets due to a parity error */ 3914 pci_save_state(pdev); 3915 3916 /* Setup the watchdog timer */ 3917 setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer, 3918 (unsigned long)mgp); 3919 3920 SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops); 3921 INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog); 3922 status = register_netdev(netdev); 3923 if (status != 0) { 3924 dev_err(&pdev->dev, "register_netdev failed: %d\n", status); 3925 goto abort_with_state; 3926 } 3927 if (mgp->msix_enabled) 3928 dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, WC %s\n", 3929 mgp->num_slices, mgp->tx_boundary, mgp->fw_name, 3930 (mgp->wc_enabled ? "Enabled" : "Disabled")); 3931 else 3932 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", 3933 mgp->msi_enabled ? "MSI" : "xPIC", 3934 pdev->irq, mgp->tx_boundary, mgp->fw_name, 3935 (mgp->wc_enabled ? "Enabled" : "Disabled")); 3936 3937 board_number++; 3938 return 0; 3939 3940 abort_with_state: 3941 pci_restore_state(pdev); 3942 3943 abort_with_slices: 3944 myri10ge_free_slices(mgp); 3945 3946 abort_with_firmware: 3947 myri10ge_dummy_rdma(mgp, 0); 3948 3949 abort_with_ioremap: 3950 if (mgp->mac_addr_string != NULL) 3951 dev_err(&pdev->dev, 3952 "myri10ge_probe() failed: MAC=%s, SN=%ld\n", 3953 mgp->mac_addr_string, mgp->serial_number); 3954 iounmap(mgp->sram); 3955 3956 abort_with_mtrr: 3957 #ifdef CONFIG_MTRR 3958 if (mgp->mtrr >= 0) 3959 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); 3960 #endif 3961 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), 3962 mgp->cmd, mgp->cmd_bus); 3963 3964 abort_with_enabled: 3965 pci_disable_device(pdev); 3966 3967 abort_with_netdev: 3968 set_fw_name(mgp, NULL, false); 3969 free_netdev(netdev); 3970 return status; 3971 } 3972 3973 /* 3974 * myri10ge_remove 3975 * 3976 * Does what is necessary to shutdown one Myrinet device. Called 3977 * once for each Myrinet card by the kernel when a module is 3978 * unloaded. 3979 */ 3980 static void myri10ge_remove(struct pci_dev *pdev) 3981 { 3982 struct myri10ge_priv *mgp; 3983 struct net_device *netdev; 3984 3985 mgp = pci_get_drvdata(pdev); 3986 if (mgp == NULL) 3987 return; 3988 3989 cancel_work_sync(&mgp->watchdog_work); 3990 netdev = mgp->dev; 3991 unregister_netdev(netdev); 3992 3993 #ifdef CONFIG_MYRI10GE_DCA 3994 myri10ge_teardown_dca(mgp); 3995 #endif 3996 myri10ge_dummy_rdma(mgp, 0); 3997 3998 /* avoid a memory leak */ 3999 pci_restore_state(pdev); 4000 4001 iounmap(mgp->sram); 4002 4003 #ifdef CONFIG_MTRR 4004 if (mgp->mtrr >= 0) 4005 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); 4006 #endif 4007 myri10ge_free_slices(mgp); 4008 if (mgp->msix_vectors != NULL) 4009 kfree(mgp->msix_vectors); 4010 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), 4011 mgp->cmd, mgp->cmd_bus); 4012 4013 set_fw_name(mgp, NULL, false); 4014 free_netdev(netdev); 4015 pci_disable_device(pdev); 4016 pci_set_drvdata(pdev, NULL); 4017 } 4018 4019 #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008 4020 #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009 4021 4022 static DEFINE_PCI_DEVICE_TABLE(myri10ge_pci_tbl) = { 4023 {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)}, 4024 {PCI_DEVICE 4025 (PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)}, 4026 {0}, 4027 }; 4028 4029 MODULE_DEVICE_TABLE(pci, myri10ge_pci_tbl); 4030 4031 static struct pci_driver myri10ge_driver = { 4032 .name = "myri10ge", 4033 .probe = myri10ge_probe, 4034 .remove = myri10ge_remove, 4035 .id_table = myri10ge_pci_tbl, 4036 #ifdef CONFIG_PM 4037 .suspend = myri10ge_suspend, 4038 .resume = myri10ge_resume, 4039 #endif 4040 }; 4041 4042 #ifdef CONFIG_MYRI10GE_DCA 4043 static int 4044 myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p) 4045 { 4046 int err = driver_for_each_device(&myri10ge_driver.driver, 4047 NULL, &event, 4048 myri10ge_notify_dca_device); 4049 4050 if (err) 4051 return NOTIFY_BAD; 4052 return NOTIFY_DONE; 4053 } 4054 4055 static struct notifier_block myri10ge_dca_notifier = { 4056 .notifier_call = myri10ge_notify_dca, 4057 .next = NULL, 4058 .priority = 0, 4059 }; 4060 #endif /* CONFIG_MYRI10GE_DCA */ 4061 4062 static __init int myri10ge_init_module(void) 4063 { 4064 pr_info("Version %s\n", MYRI10GE_VERSION_STR); 4065 4066 if (myri10ge_rss_hash > MXGEFW_RSS_HASH_TYPE_MAX) { 4067 pr_err("Illegal rssh hash type %d, defaulting to source port\n", 4068 myri10ge_rss_hash); 4069 myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT; 4070 } 4071 #ifdef CONFIG_MYRI10GE_DCA 4072 dca_register_notify(&myri10ge_dca_notifier); 4073 #endif 4074 if (myri10ge_max_slices > MYRI10GE_MAX_SLICES) 4075 myri10ge_max_slices = MYRI10GE_MAX_SLICES; 4076 4077 return pci_register_driver(&myri10ge_driver); 4078 } 4079 4080 module_init(myri10ge_init_module); 4081 4082 static __exit void myri10ge_cleanup_module(void) 4083 { 4084 #ifdef CONFIG_MYRI10GE_DCA 4085 dca_unregister_notify(&myri10ge_dca_notifier); 4086 #endif 4087 pci_unregister_driver(&myri10ge_driver); 4088 } 4089 4090 module_exit(myri10ge_cleanup_module); 4091