1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2007 - 2018 Intel Corporation. */ 3 4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 5 6 #include <linux/module.h> 7 #include <linux/types.h> 8 #include <linux/init.h> 9 #include <linux/bitops.h> 10 #include <linux/vmalloc.h> 11 #include <linux/pagemap.h> 12 #include <linux/netdevice.h> 13 #include <linux/ipv6.h> 14 #include <linux/slab.h> 15 #include <net/checksum.h> 16 #include <net/ip6_checksum.h> 17 #include <net/pkt_sched.h> 18 #include <net/pkt_cls.h> 19 #include <linux/net_tstamp.h> 20 #include <linux/mii.h> 21 #include <linux/ethtool.h> 22 #include <linux/if.h> 23 #include <linux/if_vlan.h> 24 #include <linux/pci.h> 25 #include <linux/delay.h> 26 #include <linux/interrupt.h> 27 #include <linux/ip.h> 28 #include <linux/tcp.h> 29 #include <linux/sctp.h> 30 #include <linux/if_ether.h> 31 #include <linux/aer.h> 32 #include <linux/prefetch.h> 33 #include <linux/pm_runtime.h> 34 #include <linux/etherdevice.h> 35 #ifdef CONFIG_IGB_DCA 36 #include <linux/dca.h> 37 #endif 38 #include <linux/i2c.h> 39 #include "igb.h" 40 41 #define MAJ 5 42 #define MIN 4 43 #define BUILD 0 44 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ 45 __stringify(BUILD) "-k" 46 47 enum queue_mode { 48 QUEUE_MODE_STRICT_PRIORITY, 49 QUEUE_MODE_STREAM_RESERVATION, 50 }; 51 52 enum tx_queue_prio { 53 TX_QUEUE_PRIO_HIGH, 54 TX_QUEUE_PRIO_LOW, 55 }; 56 57 char igb_driver_name[] = "igb"; 58 char igb_driver_version[] = DRV_VERSION; 59 static const char igb_driver_string[] = 60 "Intel(R) Gigabit Ethernet Network Driver"; 61 static const char igb_copyright[] = 62 "Copyright (c) 2007-2014 Intel Corporation."; 63 64 static const struct e1000_info *igb_info_tbl[] = { 65 [board_82575] = &e1000_82575_info, 66 }; 67 68 static const struct pci_device_id igb_pci_tbl[] = { 69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) }, 70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) }, 71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) }, 72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 }, 73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 }, 74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 }, 75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 }, 76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 }, 77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 }, 78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 }, 79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, 80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, 81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 }, 82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 }, 83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, 84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, 85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 }, 86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, 87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, 88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, 89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 }, 90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 }, 91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 }, 92 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 }, 93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, 94 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, 95 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 }, 96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, 97 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, 98 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, 99 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 }, 100 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, 101 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, 102 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, 103 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, 104 /* required last entry */ 105 {0, } 106 }; 107 108 MODULE_DEVICE_TABLE(pci, igb_pci_tbl); 109 110 static int igb_setup_all_tx_resources(struct igb_adapter *); 111 static int igb_setup_all_rx_resources(struct igb_adapter *); 112 static void igb_free_all_tx_resources(struct igb_adapter *); 113 static void igb_free_all_rx_resources(struct igb_adapter *); 114 static void igb_setup_mrqc(struct igb_adapter *); 115 static int igb_probe(struct pci_dev *, const struct pci_device_id *); 116 static void igb_remove(struct pci_dev *pdev); 117 static int igb_sw_init(struct igb_adapter *); 118 int igb_open(struct net_device *); 119 int igb_close(struct net_device *); 120 static void igb_configure(struct igb_adapter *); 121 static void igb_configure_tx(struct igb_adapter *); 122 static void igb_configure_rx(struct igb_adapter *); 123 static void igb_clean_all_tx_rings(struct igb_adapter *); 124 static void igb_clean_all_rx_rings(struct igb_adapter *); 125 static void igb_clean_tx_ring(struct igb_ring *); 126 static void igb_clean_rx_ring(struct igb_ring *); 127 static void igb_set_rx_mode(struct net_device *); 128 static void igb_update_phy_info(struct timer_list *); 129 static void igb_watchdog(struct timer_list *); 130 static void igb_watchdog_task(struct work_struct *); 131 static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); 132 static void igb_get_stats64(struct net_device *dev, 133 struct rtnl_link_stats64 *stats); 134 static int igb_change_mtu(struct net_device *, int); 135 static int igb_set_mac(struct net_device *, void *); 136 static void igb_set_uta(struct igb_adapter *adapter, bool set); 137 static irqreturn_t igb_intr(int irq, void *); 138 static irqreturn_t igb_intr_msi(int irq, void *); 139 static irqreturn_t igb_msix_other(int irq, void *); 140 static irqreturn_t igb_msix_ring(int irq, void *); 141 #ifdef CONFIG_IGB_DCA 142 static void igb_update_dca(struct igb_q_vector *); 143 static void igb_setup_dca(struct igb_adapter *); 144 #endif /* CONFIG_IGB_DCA */ 145 static int igb_poll(struct napi_struct *, int); 146 static bool igb_clean_tx_irq(struct igb_q_vector *, int); 147 static int igb_clean_rx_irq(struct igb_q_vector *, int); 148 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); 149 static void igb_tx_timeout(struct net_device *); 150 static void igb_reset_task(struct work_struct *); 151 static void igb_vlan_mode(struct net_device *netdev, 152 netdev_features_t features); 153 static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16); 154 static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16); 155 static void igb_restore_vlan(struct igb_adapter *); 156 static void igb_rar_set_index(struct igb_adapter *, u32); 157 static void igb_ping_all_vfs(struct igb_adapter *); 158 static void igb_msg_task(struct igb_adapter *); 159 static void igb_vmm_control(struct igb_adapter *); 160 static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *); 161 static void igb_flush_mac_table(struct igb_adapter *); 162 static int igb_available_rars(struct igb_adapter *, u8); 163 static void igb_set_default_mac_filter(struct igb_adapter *); 164 static int igb_uc_sync(struct net_device *, const unsigned char *); 165 static int igb_uc_unsync(struct net_device *, const unsigned char *); 166 static void igb_restore_vf_multicasts(struct igb_adapter *adapter); 167 static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); 168 static int igb_ndo_set_vf_vlan(struct net_device *netdev, 169 int vf, u16 vlan, u8 qos, __be16 vlan_proto); 170 static int igb_ndo_set_vf_bw(struct net_device *, int, int, int); 171 static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, 172 bool setting); 173 static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, 174 bool setting); 175 static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, 176 struct ifla_vf_info *ivi); 177 static void igb_check_vf_rate_limit(struct igb_adapter *); 178 static void igb_nfc_filter_exit(struct igb_adapter *adapter); 179 static void igb_nfc_filter_restore(struct igb_adapter *adapter); 180 181 #ifdef CONFIG_PCI_IOV 182 static int igb_vf_configure(struct igb_adapter *adapter, int vf); 183 static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs); 184 static int igb_disable_sriov(struct pci_dev *dev); 185 static int igb_pci_disable_sriov(struct pci_dev *dev); 186 #endif 187 188 static int igb_suspend(struct device *); 189 static int igb_resume(struct device *); 190 static int igb_runtime_suspend(struct device *dev); 191 static int igb_runtime_resume(struct device *dev); 192 static int igb_runtime_idle(struct device *dev); 193 static const struct dev_pm_ops igb_pm_ops = { 194 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume) 195 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume, 196 igb_runtime_idle) 197 }; 198 static void igb_shutdown(struct pci_dev *); 199 static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs); 200 #ifdef CONFIG_IGB_DCA 201 static int igb_notify_dca(struct notifier_block *, unsigned long, void *); 202 static struct notifier_block dca_notifier = { 203 .notifier_call = igb_notify_dca, 204 .next = NULL, 205 .priority = 0 206 }; 207 #endif 208 #ifdef CONFIG_NET_POLL_CONTROLLER 209 /* for netdump / net console */ 210 static void igb_netpoll(struct net_device *); 211 #endif 212 #ifdef CONFIG_PCI_IOV 213 static unsigned int max_vfs; 214 module_param(max_vfs, uint, 0); 215 MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function"); 216 #endif /* CONFIG_PCI_IOV */ 217 218 static pci_ers_result_t igb_io_error_detected(struct pci_dev *, 219 pci_channel_state_t); 220 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *); 221 static void igb_io_resume(struct pci_dev *); 222 223 static const struct pci_error_handlers igb_err_handler = { 224 .error_detected = igb_io_error_detected, 225 .slot_reset = igb_io_slot_reset, 226 .resume = igb_io_resume, 227 }; 228 229 static void igb_init_dmac(struct igb_adapter *adapter, u32 pba); 230 231 static struct pci_driver igb_driver = { 232 .name = igb_driver_name, 233 .id_table = igb_pci_tbl, 234 .probe = igb_probe, 235 .remove = igb_remove, 236 #ifdef CONFIG_PM 237 .driver.pm = &igb_pm_ops, 238 #endif 239 .shutdown = igb_shutdown, 240 .sriov_configure = igb_pci_sriov_configure, 241 .err_handler = &igb_err_handler 242 }; 243 244 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 245 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); 246 MODULE_LICENSE("GPL"); 247 MODULE_VERSION(DRV_VERSION); 248 249 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 250 static int debug = -1; 251 module_param(debug, int, 0); 252 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 253 254 struct igb_reg_info { 255 u32 ofs; 256 char *name; 257 }; 258 259 static const struct igb_reg_info igb_reg_info_tbl[] = { 260 261 /* General Registers */ 262 {E1000_CTRL, "CTRL"}, 263 {E1000_STATUS, "STATUS"}, 264 {E1000_CTRL_EXT, "CTRL_EXT"}, 265 266 /* Interrupt Registers */ 267 {E1000_ICR, "ICR"}, 268 269 /* RX Registers */ 270 {E1000_RCTL, "RCTL"}, 271 {E1000_RDLEN(0), "RDLEN"}, 272 {E1000_RDH(0), "RDH"}, 273 {E1000_RDT(0), "RDT"}, 274 {E1000_RXDCTL(0), "RXDCTL"}, 275 {E1000_RDBAL(0), "RDBAL"}, 276 {E1000_RDBAH(0), "RDBAH"}, 277 278 /* TX Registers */ 279 {E1000_TCTL, "TCTL"}, 280 {E1000_TDBAL(0), "TDBAL"}, 281 {E1000_TDBAH(0), "TDBAH"}, 282 {E1000_TDLEN(0), "TDLEN"}, 283 {E1000_TDH(0), "TDH"}, 284 {E1000_TDT(0), "TDT"}, 285 {E1000_TXDCTL(0), "TXDCTL"}, 286 {E1000_TDFH, "TDFH"}, 287 {E1000_TDFT, "TDFT"}, 288 {E1000_TDFHS, "TDFHS"}, 289 {E1000_TDFPC, "TDFPC"}, 290 291 /* List Terminator */ 292 {} 293 }; 294 295 /* igb_regdump - register printout routine */ 296 static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo) 297 { 298 int n = 0; 299 char rname[16]; 300 u32 regs[8]; 301 302 switch (reginfo->ofs) { 303 case E1000_RDLEN(0): 304 for (n = 0; n < 4; n++) 305 regs[n] = rd32(E1000_RDLEN(n)); 306 break; 307 case E1000_RDH(0): 308 for (n = 0; n < 4; n++) 309 regs[n] = rd32(E1000_RDH(n)); 310 break; 311 case E1000_RDT(0): 312 for (n = 0; n < 4; n++) 313 regs[n] = rd32(E1000_RDT(n)); 314 break; 315 case E1000_RXDCTL(0): 316 for (n = 0; n < 4; n++) 317 regs[n] = rd32(E1000_RXDCTL(n)); 318 break; 319 case E1000_RDBAL(0): 320 for (n = 0; n < 4; n++) 321 regs[n] = rd32(E1000_RDBAL(n)); 322 break; 323 case E1000_RDBAH(0): 324 for (n = 0; n < 4; n++) 325 regs[n] = rd32(E1000_RDBAH(n)); 326 break; 327 case E1000_TDBAL(0): 328 for (n = 0; n < 4; n++) 329 regs[n] = rd32(E1000_RDBAL(n)); 330 break; 331 case E1000_TDBAH(0): 332 for (n = 0; n < 4; n++) 333 regs[n] = rd32(E1000_TDBAH(n)); 334 break; 335 case E1000_TDLEN(0): 336 for (n = 0; n < 4; n++) 337 regs[n] = rd32(E1000_TDLEN(n)); 338 break; 339 case E1000_TDH(0): 340 for (n = 0; n < 4; n++) 341 regs[n] = rd32(E1000_TDH(n)); 342 break; 343 case E1000_TDT(0): 344 for (n = 0; n < 4; n++) 345 regs[n] = rd32(E1000_TDT(n)); 346 break; 347 case E1000_TXDCTL(0): 348 for (n = 0; n < 4; n++) 349 regs[n] = rd32(E1000_TXDCTL(n)); 350 break; 351 default: 352 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs)); 353 return; 354 } 355 356 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); 357 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], 358 regs[2], regs[3]); 359 } 360 361 /* igb_dump - Print registers, Tx-rings and Rx-rings */ 362 static void igb_dump(struct igb_adapter *adapter) 363 { 364 struct net_device *netdev = adapter->netdev; 365 struct e1000_hw *hw = &adapter->hw; 366 struct igb_reg_info *reginfo; 367 struct igb_ring *tx_ring; 368 union e1000_adv_tx_desc *tx_desc; 369 struct my_u0 { u64 a; u64 b; } *u0; 370 struct igb_ring *rx_ring; 371 union e1000_adv_rx_desc *rx_desc; 372 u32 staterr; 373 u16 i, n; 374 375 if (!netif_msg_hw(adapter)) 376 return; 377 378 /* Print netdevice Info */ 379 if (netdev) { 380 dev_info(&adapter->pdev->dev, "Net device Info\n"); 381 pr_info("Device Name state trans_start\n"); 382 pr_info("%-15s %016lX %016lX\n", netdev->name, 383 netdev->state, dev_trans_start(netdev)); 384 } 385 386 /* Print Registers */ 387 dev_info(&adapter->pdev->dev, "Register Dump\n"); 388 pr_info(" Register Name Value\n"); 389 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl; 390 reginfo->name; reginfo++) { 391 igb_regdump(hw, reginfo); 392 } 393 394 /* Print TX Ring Summary */ 395 if (!netdev || !netif_running(netdev)) 396 goto exit; 397 398 dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); 399 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); 400 for (n = 0; n < adapter->num_tx_queues; n++) { 401 struct igb_tx_buffer *buffer_info; 402 tx_ring = adapter->tx_ring[n]; 403 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; 404 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", 405 n, tx_ring->next_to_use, tx_ring->next_to_clean, 406 (u64)dma_unmap_addr(buffer_info, dma), 407 dma_unmap_len(buffer_info, len), 408 buffer_info->next_to_watch, 409 (u64)buffer_info->time_stamp); 410 } 411 412 /* Print TX Rings */ 413 if (!netif_msg_tx_done(adapter)) 414 goto rx_ring_summary; 415 416 dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); 417 418 /* Transmit Descriptor Formats 419 * 420 * Advanced Transmit Descriptor 421 * +--------------------------------------------------------------+ 422 * 0 | Buffer Address [63:0] | 423 * +--------------------------------------------------------------+ 424 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | 425 * +--------------------------------------------------------------+ 426 * 63 46 45 40 39 38 36 35 32 31 24 15 0 427 */ 428 429 for (n = 0; n < adapter->num_tx_queues; n++) { 430 tx_ring = adapter->tx_ring[n]; 431 pr_info("------------------------------------\n"); 432 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); 433 pr_info("------------------------------------\n"); 434 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n"); 435 436 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 437 const char *next_desc; 438 struct igb_tx_buffer *buffer_info; 439 tx_desc = IGB_TX_DESC(tx_ring, i); 440 buffer_info = &tx_ring->tx_buffer_info[i]; 441 u0 = (struct my_u0 *)tx_desc; 442 if (i == tx_ring->next_to_use && 443 i == tx_ring->next_to_clean) 444 next_desc = " NTC/U"; 445 else if (i == tx_ring->next_to_use) 446 next_desc = " NTU"; 447 else if (i == tx_ring->next_to_clean) 448 next_desc = " NTC"; 449 else 450 next_desc = ""; 451 452 pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n", 453 i, le64_to_cpu(u0->a), 454 le64_to_cpu(u0->b), 455 (u64)dma_unmap_addr(buffer_info, dma), 456 dma_unmap_len(buffer_info, len), 457 buffer_info->next_to_watch, 458 (u64)buffer_info->time_stamp, 459 buffer_info->skb, next_desc); 460 461 if (netif_msg_pktdata(adapter) && buffer_info->skb) 462 print_hex_dump(KERN_INFO, "", 463 DUMP_PREFIX_ADDRESS, 464 16, 1, buffer_info->skb->data, 465 dma_unmap_len(buffer_info, len), 466 true); 467 } 468 } 469 470 /* Print RX Rings Summary */ 471 rx_ring_summary: 472 dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); 473 pr_info("Queue [NTU] [NTC]\n"); 474 for (n = 0; n < adapter->num_rx_queues; n++) { 475 rx_ring = adapter->rx_ring[n]; 476 pr_info(" %5d %5X %5X\n", 477 n, rx_ring->next_to_use, rx_ring->next_to_clean); 478 } 479 480 /* Print RX Rings */ 481 if (!netif_msg_rx_status(adapter)) 482 goto exit; 483 484 dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); 485 486 /* Advanced Receive Descriptor (Read) Format 487 * 63 1 0 488 * +-----------------------------------------------------+ 489 * 0 | Packet Buffer Address [63:1] |A0/NSE| 490 * +----------------------------------------------+------+ 491 * 8 | Header Buffer Address [63:1] | DD | 492 * +-----------------------------------------------------+ 493 * 494 * 495 * Advanced Receive Descriptor (Write-Back) Format 496 * 497 * 63 48 47 32 31 30 21 20 17 16 4 3 0 498 * +------------------------------------------------------+ 499 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | 500 * | Checksum Ident | | | | Type | Type | 501 * +------------------------------------------------------+ 502 * 8 | VLAN Tag | Length | Extended Error | Extended Status | 503 * +------------------------------------------------------+ 504 * 63 48 47 32 31 20 19 0 505 */ 506 507 for (n = 0; n < adapter->num_rx_queues; n++) { 508 rx_ring = adapter->rx_ring[n]; 509 pr_info("------------------------------------\n"); 510 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); 511 pr_info("------------------------------------\n"); 512 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); 513 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n"); 514 515 for (i = 0; i < rx_ring->count; i++) { 516 const char *next_desc; 517 struct igb_rx_buffer *buffer_info; 518 buffer_info = &rx_ring->rx_buffer_info[i]; 519 rx_desc = IGB_RX_DESC(rx_ring, i); 520 u0 = (struct my_u0 *)rx_desc; 521 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 522 523 if (i == rx_ring->next_to_use) 524 next_desc = " NTU"; 525 else if (i == rx_ring->next_to_clean) 526 next_desc = " NTC"; 527 else 528 next_desc = ""; 529 530 if (staterr & E1000_RXD_STAT_DD) { 531 /* Descriptor Done */ 532 pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n", 533 "RWB", i, 534 le64_to_cpu(u0->a), 535 le64_to_cpu(u0->b), 536 next_desc); 537 } else { 538 pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n", 539 "R ", i, 540 le64_to_cpu(u0->a), 541 le64_to_cpu(u0->b), 542 (u64)buffer_info->dma, 543 next_desc); 544 545 if (netif_msg_pktdata(adapter) && 546 buffer_info->dma && buffer_info->page) { 547 print_hex_dump(KERN_INFO, "", 548 DUMP_PREFIX_ADDRESS, 549 16, 1, 550 page_address(buffer_info->page) + 551 buffer_info->page_offset, 552 igb_rx_bufsz(rx_ring), true); 553 } 554 } 555 } 556 } 557 558 exit: 559 return; 560 } 561 562 /** 563 * igb_get_i2c_data - Reads the I2C SDA data bit 564 * @hw: pointer to hardware structure 565 * @i2cctl: Current value of I2CCTL register 566 * 567 * Returns the I2C data bit value 568 **/ 569 static int igb_get_i2c_data(void *data) 570 { 571 struct igb_adapter *adapter = (struct igb_adapter *)data; 572 struct e1000_hw *hw = &adapter->hw; 573 s32 i2cctl = rd32(E1000_I2CPARAMS); 574 575 return !!(i2cctl & E1000_I2C_DATA_IN); 576 } 577 578 /** 579 * igb_set_i2c_data - Sets the I2C data bit 580 * @data: pointer to hardware structure 581 * @state: I2C data value (0 or 1) to set 582 * 583 * Sets the I2C data bit 584 **/ 585 static void igb_set_i2c_data(void *data, int state) 586 { 587 struct igb_adapter *adapter = (struct igb_adapter *)data; 588 struct e1000_hw *hw = &adapter->hw; 589 s32 i2cctl = rd32(E1000_I2CPARAMS); 590 591 if (state) 592 i2cctl |= E1000_I2C_DATA_OUT; 593 else 594 i2cctl &= ~E1000_I2C_DATA_OUT; 595 596 i2cctl &= ~E1000_I2C_DATA_OE_N; 597 i2cctl |= E1000_I2C_CLK_OE_N; 598 wr32(E1000_I2CPARAMS, i2cctl); 599 wrfl(); 600 601 } 602 603 /** 604 * igb_set_i2c_clk - Sets the I2C SCL clock 605 * @data: pointer to hardware structure 606 * @state: state to set clock 607 * 608 * Sets the I2C clock line to state 609 **/ 610 static void igb_set_i2c_clk(void *data, int state) 611 { 612 struct igb_adapter *adapter = (struct igb_adapter *)data; 613 struct e1000_hw *hw = &adapter->hw; 614 s32 i2cctl = rd32(E1000_I2CPARAMS); 615 616 if (state) { 617 i2cctl |= E1000_I2C_CLK_OUT; 618 i2cctl &= ~E1000_I2C_CLK_OE_N; 619 } else { 620 i2cctl &= ~E1000_I2C_CLK_OUT; 621 i2cctl &= ~E1000_I2C_CLK_OE_N; 622 } 623 wr32(E1000_I2CPARAMS, i2cctl); 624 wrfl(); 625 } 626 627 /** 628 * igb_get_i2c_clk - Gets the I2C SCL clock state 629 * @data: pointer to hardware structure 630 * 631 * Gets the I2C clock state 632 **/ 633 static int igb_get_i2c_clk(void *data) 634 { 635 struct igb_adapter *adapter = (struct igb_adapter *)data; 636 struct e1000_hw *hw = &adapter->hw; 637 s32 i2cctl = rd32(E1000_I2CPARAMS); 638 639 return !!(i2cctl & E1000_I2C_CLK_IN); 640 } 641 642 static const struct i2c_algo_bit_data igb_i2c_algo = { 643 .setsda = igb_set_i2c_data, 644 .setscl = igb_set_i2c_clk, 645 .getsda = igb_get_i2c_data, 646 .getscl = igb_get_i2c_clk, 647 .udelay = 5, 648 .timeout = 20, 649 }; 650 651 /** 652 * igb_get_hw_dev - return device 653 * @hw: pointer to hardware structure 654 * 655 * used by hardware layer to print debugging information 656 **/ 657 struct net_device *igb_get_hw_dev(struct e1000_hw *hw) 658 { 659 struct igb_adapter *adapter = hw->back; 660 return adapter->netdev; 661 } 662 663 /** 664 * igb_init_module - Driver Registration Routine 665 * 666 * igb_init_module is the first routine called when the driver is 667 * loaded. All it does is register with the PCI subsystem. 668 **/ 669 static int __init igb_init_module(void) 670 { 671 int ret; 672 673 pr_info("%s - version %s\n", 674 igb_driver_string, igb_driver_version); 675 pr_info("%s\n", igb_copyright); 676 677 #ifdef CONFIG_IGB_DCA 678 dca_register_notify(&dca_notifier); 679 #endif 680 ret = pci_register_driver(&igb_driver); 681 return ret; 682 } 683 684 module_init(igb_init_module); 685 686 /** 687 * igb_exit_module - Driver Exit Cleanup Routine 688 * 689 * igb_exit_module is called just before the driver is removed 690 * from memory. 691 **/ 692 static void __exit igb_exit_module(void) 693 { 694 #ifdef CONFIG_IGB_DCA 695 dca_unregister_notify(&dca_notifier); 696 #endif 697 pci_unregister_driver(&igb_driver); 698 } 699 700 module_exit(igb_exit_module); 701 702 #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1)) 703 /** 704 * igb_cache_ring_register - Descriptor ring to register mapping 705 * @adapter: board private structure to initialize 706 * 707 * Once we know the feature-set enabled for the device, we'll cache 708 * the register offset the descriptor ring is assigned to. 709 **/ 710 static void igb_cache_ring_register(struct igb_adapter *adapter) 711 { 712 int i = 0, j = 0; 713 u32 rbase_offset = adapter->vfs_allocated_count; 714 715 switch (adapter->hw.mac.type) { 716 case e1000_82576: 717 /* The queues are allocated for virtualization such that VF 0 718 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc. 719 * In order to avoid collision we start at the first free queue 720 * and continue consuming queues in the same sequence 721 */ 722 if (adapter->vfs_allocated_count) { 723 for (; i < adapter->rss_queues; i++) 724 adapter->rx_ring[i]->reg_idx = rbase_offset + 725 Q_IDX_82576(i); 726 } 727 /* Fall through */ 728 case e1000_82575: 729 case e1000_82580: 730 case e1000_i350: 731 case e1000_i354: 732 case e1000_i210: 733 case e1000_i211: 734 /* Fall through */ 735 default: 736 for (; i < adapter->num_rx_queues; i++) 737 adapter->rx_ring[i]->reg_idx = rbase_offset + i; 738 for (; j < adapter->num_tx_queues; j++) 739 adapter->tx_ring[j]->reg_idx = rbase_offset + j; 740 break; 741 } 742 } 743 744 u32 igb_rd32(struct e1000_hw *hw, u32 reg) 745 { 746 struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw); 747 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); 748 u32 value = 0; 749 750 if (E1000_REMOVED(hw_addr)) 751 return ~value; 752 753 value = readl(&hw_addr[reg]); 754 755 /* reads should not return all F's */ 756 if (!(~value) && (!reg || !(~readl(hw_addr)))) { 757 struct net_device *netdev = igb->netdev; 758 hw->hw_addr = NULL; 759 netdev_err(netdev, "PCIe link lost\n"); 760 } 761 762 return value; 763 } 764 765 /** 766 * igb_write_ivar - configure ivar for given MSI-X vector 767 * @hw: pointer to the HW structure 768 * @msix_vector: vector number we are allocating to a given ring 769 * @index: row index of IVAR register to write within IVAR table 770 * @offset: column offset of in IVAR, should be multiple of 8 771 * 772 * This function is intended to handle the writing of the IVAR register 773 * for adapters 82576 and newer. The IVAR table consists of 2 columns, 774 * each containing an cause allocation for an Rx and Tx ring, and a 775 * variable number of rows depending on the number of queues supported. 776 **/ 777 static void igb_write_ivar(struct e1000_hw *hw, int msix_vector, 778 int index, int offset) 779 { 780 u32 ivar = array_rd32(E1000_IVAR0, index); 781 782 /* clear any bits that are currently set */ 783 ivar &= ~((u32)0xFF << offset); 784 785 /* write vector and valid bit */ 786 ivar |= (msix_vector | E1000_IVAR_VALID) << offset; 787 788 array_wr32(E1000_IVAR0, index, ivar); 789 } 790 791 #define IGB_N0_QUEUE -1 792 static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) 793 { 794 struct igb_adapter *adapter = q_vector->adapter; 795 struct e1000_hw *hw = &adapter->hw; 796 int rx_queue = IGB_N0_QUEUE; 797 int tx_queue = IGB_N0_QUEUE; 798 u32 msixbm = 0; 799 800 if (q_vector->rx.ring) 801 rx_queue = q_vector->rx.ring->reg_idx; 802 if (q_vector->tx.ring) 803 tx_queue = q_vector->tx.ring->reg_idx; 804 805 switch (hw->mac.type) { 806 case e1000_82575: 807 /* The 82575 assigns vectors using a bitmask, which matches the 808 * bitmask for the EICR/EIMS/EIMC registers. To assign one 809 * or more queues to a vector, we write the appropriate bits 810 * into the MSIXBM register for that vector. 811 */ 812 if (rx_queue > IGB_N0_QUEUE) 813 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; 814 if (tx_queue > IGB_N0_QUEUE) 815 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; 816 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0) 817 msixbm |= E1000_EIMS_OTHER; 818 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); 819 q_vector->eims_value = msixbm; 820 break; 821 case e1000_82576: 822 /* 82576 uses a table that essentially consists of 2 columns 823 * with 8 rows. The ordering is column-major so we use the 824 * lower 3 bits as the row index, and the 4th bit as the 825 * column offset. 826 */ 827 if (rx_queue > IGB_N0_QUEUE) 828 igb_write_ivar(hw, msix_vector, 829 rx_queue & 0x7, 830 (rx_queue & 0x8) << 1); 831 if (tx_queue > IGB_N0_QUEUE) 832 igb_write_ivar(hw, msix_vector, 833 tx_queue & 0x7, 834 ((tx_queue & 0x8) << 1) + 8); 835 q_vector->eims_value = BIT(msix_vector); 836 break; 837 case e1000_82580: 838 case e1000_i350: 839 case e1000_i354: 840 case e1000_i210: 841 case e1000_i211: 842 /* On 82580 and newer adapters the scheme is similar to 82576 843 * however instead of ordering column-major we have things 844 * ordered row-major. So we traverse the table by using 845 * bit 0 as the column offset, and the remaining bits as the 846 * row index. 847 */ 848 if (rx_queue > IGB_N0_QUEUE) 849 igb_write_ivar(hw, msix_vector, 850 rx_queue >> 1, 851 (rx_queue & 0x1) << 4); 852 if (tx_queue > IGB_N0_QUEUE) 853 igb_write_ivar(hw, msix_vector, 854 tx_queue >> 1, 855 ((tx_queue & 0x1) << 4) + 8); 856 q_vector->eims_value = BIT(msix_vector); 857 break; 858 default: 859 BUG(); 860 break; 861 } 862 863 /* add q_vector eims value to global eims_enable_mask */ 864 adapter->eims_enable_mask |= q_vector->eims_value; 865 866 /* configure q_vector to set itr on first interrupt */ 867 q_vector->set_itr = 1; 868 } 869 870 /** 871 * igb_configure_msix - Configure MSI-X hardware 872 * @adapter: board private structure to initialize 873 * 874 * igb_configure_msix sets up the hardware to properly 875 * generate MSI-X interrupts. 876 **/ 877 static void igb_configure_msix(struct igb_adapter *adapter) 878 { 879 u32 tmp; 880 int i, vector = 0; 881 struct e1000_hw *hw = &adapter->hw; 882 883 adapter->eims_enable_mask = 0; 884 885 /* set vector for other causes, i.e. link changes */ 886 switch (hw->mac.type) { 887 case e1000_82575: 888 tmp = rd32(E1000_CTRL_EXT); 889 /* enable MSI-X PBA support*/ 890 tmp |= E1000_CTRL_EXT_PBA_CLR; 891 892 /* Auto-Mask interrupts upon ICR read. */ 893 tmp |= E1000_CTRL_EXT_EIAME; 894 tmp |= E1000_CTRL_EXT_IRCA; 895 896 wr32(E1000_CTRL_EXT, tmp); 897 898 /* enable msix_other interrupt */ 899 array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER); 900 adapter->eims_other = E1000_EIMS_OTHER; 901 902 break; 903 904 case e1000_82576: 905 case e1000_82580: 906 case e1000_i350: 907 case e1000_i354: 908 case e1000_i210: 909 case e1000_i211: 910 /* Turn on MSI-X capability first, or our settings 911 * won't stick. And it will take days to debug. 912 */ 913 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | 914 E1000_GPIE_PBA | E1000_GPIE_EIAME | 915 E1000_GPIE_NSICR); 916 917 /* enable msix_other interrupt */ 918 adapter->eims_other = BIT(vector); 919 tmp = (vector++ | E1000_IVAR_VALID) << 8; 920 921 wr32(E1000_IVAR_MISC, tmp); 922 break; 923 default: 924 /* do nothing, since nothing else supports MSI-X */ 925 break; 926 } /* switch (hw->mac.type) */ 927 928 adapter->eims_enable_mask |= adapter->eims_other; 929 930 for (i = 0; i < adapter->num_q_vectors; i++) 931 igb_assign_vector(adapter->q_vector[i], vector++); 932 933 wrfl(); 934 } 935 936 /** 937 * igb_request_msix - Initialize MSI-X interrupts 938 * @adapter: board private structure to initialize 939 * 940 * igb_request_msix allocates MSI-X vectors and requests interrupts from the 941 * kernel. 942 **/ 943 static int igb_request_msix(struct igb_adapter *adapter) 944 { 945 struct net_device *netdev = adapter->netdev; 946 int i, err = 0, vector = 0, free_vector = 0; 947 948 err = request_irq(adapter->msix_entries[vector].vector, 949 igb_msix_other, 0, netdev->name, adapter); 950 if (err) 951 goto err_out; 952 953 for (i = 0; i < adapter->num_q_vectors; i++) { 954 struct igb_q_vector *q_vector = adapter->q_vector[i]; 955 956 vector++; 957 958 q_vector->itr_register = adapter->io_addr + E1000_EITR(vector); 959 960 if (q_vector->rx.ring && q_vector->tx.ring) 961 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, 962 q_vector->rx.ring->queue_index); 963 else if (q_vector->tx.ring) 964 sprintf(q_vector->name, "%s-tx-%u", netdev->name, 965 q_vector->tx.ring->queue_index); 966 else if (q_vector->rx.ring) 967 sprintf(q_vector->name, "%s-rx-%u", netdev->name, 968 q_vector->rx.ring->queue_index); 969 else 970 sprintf(q_vector->name, "%s-unused", netdev->name); 971 972 err = request_irq(adapter->msix_entries[vector].vector, 973 igb_msix_ring, 0, q_vector->name, 974 q_vector); 975 if (err) 976 goto err_free; 977 } 978 979 igb_configure_msix(adapter); 980 return 0; 981 982 err_free: 983 /* free already assigned IRQs */ 984 free_irq(adapter->msix_entries[free_vector++].vector, adapter); 985 986 vector--; 987 for (i = 0; i < vector; i++) { 988 free_irq(adapter->msix_entries[free_vector++].vector, 989 adapter->q_vector[i]); 990 } 991 err_out: 992 return err; 993 } 994 995 /** 996 * igb_free_q_vector - Free memory allocated for specific interrupt vector 997 * @adapter: board private structure to initialize 998 * @v_idx: Index of vector to be freed 999 * 1000 * This function frees the memory allocated to the q_vector. 1001 **/ 1002 static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) 1003 { 1004 struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; 1005 1006 adapter->q_vector[v_idx] = NULL; 1007 1008 /* igb_get_stats64() might access the rings on this vector, 1009 * we must wait a grace period before freeing it. 1010 */ 1011 if (q_vector) 1012 kfree_rcu(q_vector, rcu); 1013 } 1014 1015 /** 1016 * igb_reset_q_vector - Reset config for interrupt vector 1017 * @adapter: board private structure to initialize 1018 * @v_idx: Index of vector to be reset 1019 * 1020 * If NAPI is enabled it will delete any references to the 1021 * NAPI struct. This is preparation for igb_free_q_vector. 1022 **/ 1023 static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx) 1024 { 1025 struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; 1026 1027 /* Coming from igb_set_interrupt_capability, the vectors are not yet 1028 * allocated. So, q_vector is NULL so we should stop here. 1029 */ 1030 if (!q_vector) 1031 return; 1032 1033 if (q_vector->tx.ring) 1034 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; 1035 1036 if (q_vector->rx.ring) 1037 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; 1038 1039 netif_napi_del(&q_vector->napi); 1040 1041 } 1042 1043 static void igb_reset_interrupt_capability(struct igb_adapter *adapter) 1044 { 1045 int v_idx = adapter->num_q_vectors; 1046 1047 if (adapter->flags & IGB_FLAG_HAS_MSIX) 1048 pci_disable_msix(adapter->pdev); 1049 else if (adapter->flags & IGB_FLAG_HAS_MSI) 1050 pci_disable_msi(adapter->pdev); 1051 1052 while (v_idx--) 1053 igb_reset_q_vector(adapter, v_idx); 1054 } 1055 1056 /** 1057 * igb_free_q_vectors - Free memory allocated for interrupt vectors 1058 * @adapter: board private structure to initialize 1059 * 1060 * This function frees the memory allocated to the q_vectors. In addition if 1061 * NAPI is enabled it will delete any references to the NAPI struct prior 1062 * to freeing the q_vector. 1063 **/ 1064 static void igb_free_q_vectors(struct igb_adapter *adapter) 1065 { 1066 int v_idx = adapter->num_q_vectors; 1067 1068 adapter->num_tx_queues = 0; 1069 adapter->num_rx_queues = 0; 1070 adapter->num_q_vectors = 0; 1071 1072 while (v_idx--) { 1073 igb_reset_q_vector(adapter, v_idx); 1074 igb_free_q_vector(adapter, v_idx); 1075 } 1076 } 1077 1078 /** 1079 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts 1080 * @adapter: board private structure to initialize 1081 * 1082 * This function resets the device so that it has 0 Rx queues, Tx queues, and 1083 * MSI-X interrupts allocated. 1084 */ 1085 static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) 1086 { 1087 igb_free_q_vectors(adapter); 1088 igb_reset_interrupt_capability(adapter); 1089 } 1090 1091 /** 1092 * igb_set_interrupt_capability - set MSI or MSI-X if supported 1093 * @adapter: board private structure to initialize 1094 * @msix: boolean value of MSIX capability 1095 * 1096 * Attempt to configure interrupts using the best available 1097 * capabilities of the hardware and kernel. 1098 **/ 1099 static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) 1100 { 1101 int err; 1102 int numvecs, i; 1103 1104 if (!msix) 1105 goto msi_only; 1106 adapter->flags |= IGB_FLAG_HAS_MSIX; 1107 1108 /* Number of supported queues. */ 1109 adapter->num_rx_queues = adapter->rss_queues; 1110 if (adapter->vfs_allocated_count) 1111 adapter->num_tx_queues = 1; 1112 else 1113 adapter->num_tx_queues = adapter->rss_queues; 1114 1115 /* start with one vector for every Rx queue */ 1116 numvecs = adapter->num_rx_queues; 1117 1118 /* if Tx handler is separate add 1 for every Tx queue */ 1119 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) 1120 numvecs += adapter->num_tx_queues; 1121 1122 /* store the number of vectors reserved for queues */ 1123 adapter->num_q_vectors = numvecs; 1124 1125 /* add 1 vector for link status interrupts */ 1126 numvecs++; 1127 for (i = 0; i < numvecs; i++) 1128 adapter->msix_entries[i].entry = i; 1129 1130 err = pci_enable_msix_range(adapter->pdev, 1131 adapter->msix_entries, 1132 numvecs, 1133 numvecs); 1134 if (err > 0) 1135 return; 1136 1137 igb_reset_interrupt_capability(adapter); 1138 1139 /* If we can't do MSI-X, try MSI */ 1140 msi_only: 1141 adapter->flags &= ~IGB_FLAG_HAS_MSIX; 1142 #ifdef CONFIG_PCI_IOV 1143 /* disable SR-IOV for non MSI-X configurations */ 1144 if (adapter->vf_data) { 1145 struct e1000_hw *hw = &adapter->hw; 1146 /* disable iov and allow time for transactions to clear */ 1147 pci_disable_sriov(adapter->pdev); 1148 msleep(500); 1149 1150 kfree(adapter->vf_mac_list); 1151 adapter->vf_mac_list = NULL; 1152 kfree(adapter->vf_data); 1153 adapter->vf_data = NULL; 1154 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); 1155 wrfl(); 1156 msleep(100); 1157 dev_info(&adapter->pdev->dev, "IOV Disabled\n"); 1158 } 1159 #endif 1160 adapter->vfs_allocated_count = 0; 1161 adapter->rss_queues = 1; 1162 adapter->flags |= IGB_FLAG_QUEUE_PAIRS; 1163 adapter->num_rx_queues = 1; 1164 adapter->num_tx_queues = 1; 1165 adapter->num_q_vectors = 1; 1166 if (!pci_enable_msi(adapter->pdev)) 1167 adapter->flags |= IGB_FLAG_HAS_MSI; 1168 } 1169 1170 static void igb_add_ring(struct igb_ring *ring, 1171 struct igb_ring_container *head) 1172 { 1173 head->ring = ring; 1174 head->count++; 1175 } 1176 1177 /** 1178 * igb_alloc_q_vector - Allocate memory for a single interrupt vector 1179 * @adapter: board private structure to initialize 1180 * @v_count: q_vectors allocated on adapter, used for ring interleaving 1181 * @v_idx: index of vector in adapter struct 1182 * @txr_count: total number of Tx rings to allocate 1183 * @txr_idx: index of first Tx ring to allocate 1184 * @rxr_count: total number of Rx rings to allocate 1185 * @rxr_idx: index of first Rx ring to allocate 1186 * 1187 * We allocate one q_vector. If allocation fails we return -ENOMEM. 1188 **/ 1189 static int igb_alloc_q_vector(struct igb_adapter *adapter, 1190 int v_count, int v_idx, 1191 int txr_count, int txr_idx, 1192 int rxr_count, int rxr_idx) 1193 { 1194 struct igb_q_vector *q_vector; 1195 struct igb_ring *ring; 1196 int ring_count, size; 1197 1198 /* igb only supports 1 Tx and/or 1 Rx queue per vector */ 1199 if (txr_count > 1 || rxr_count > 1) 1200 return -ENOMEM; 1201 1202 ring_count = txr_count + rxr_count; 1203 size = sizeof(struct igb_q_vector) + 1204 (sizeof(struct igb_ring) * ring_count); 1205 1206 /* allocate q_vector and rings */ 1207 q_vector = adapter->q_vector[v_idx]; 1208 if (!q_vector) { 1209 q_vector = kzalloc(size, GFP_KERNEL); 1210 } else if (size > ksize(q_vector)) { 1211 kfree_rcu(q_vector, rcu); 1212 q_vector = kzalloc(size, GFP_KERNEL); 1213 } else { 1214 memset(q_vector, 0, size); 1215 } 1216 if (!q_vector) 1217 return -ENOMEM; 1218 1219 /* initialize NAPI */ 1220 netif_napi_add(adapter->netdev, &q_vector->napi, 1221 igb_poll, 64); 1222 1223 /* tie q_vector and adapter together */ 1224 adapter->q_vector[v_idx] = q_vector; 1225 q_vector->adapter = adapter; 1226 1227 /* initialize work limits */ 1228 q_vector->tx.work_limit = adapter->tx_work_limit; 1229 1230 /* initialize ITR configuration */ 1231 q_vector->itr_register = adapter->io_addr + E1000_EITR(0); 1232 q_vector->itr_val = IGB_START_ITR; 1233 1234 /* initialize pointer to rings */ 1235 ring = q_vector->ring; 1236 1237 /* intialize ITR */ 1238 if (rxr_count) { 1239 /* rx or rx/tx vector */ 1240 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) 1241 q_vector->itr_val = adapter->rx_itr_setting; 1242 } else { 1243 /* tx only vector */ 1244 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) 1245 q_vector->itr_val = adapter->tx_itr_setting; 1246 } 1247 1248 if (txr_count) { 1249 /* assign generic ring traits */ 1250 ring->dev = &adapter->pdev->dev; 1251 ring->netdev = adapter->netdev; 1252 1253 /* configure backlink on ring */ 1254 ring->q_vector = q_vector; 1255 1256 /* update q_vector Tx values */ 1257 igb_add_ring(ring, &q_vector->tx); 1258 1259 /* For 82575, context index must be unique per ring. */ 1260 if (adapter->hw.mac.type == e1000_82575) 1261 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags); 1262 1263 /* apply Tx specific ring traits */ 1264 ring->count = adapter->tx_ring_count; 1265 ring->queue_index = txr_idx; 1266 1267 ring->cbs_enable = false; 1268 ring->idleslope = 0; 1269 ring->sendslope = 0; 1270 ring->hicredit = 0; 1271 ring->locredit = 0; 1272 1273 u64_stats_init(&ring->tx_syncp); 1274 u64_stats_init(&ring->tx_syncp2); 1275 1276 /* assign ring to adapter */ 1277 adapter->tx_ring[txr_idx] = ring; 1278 1279 /* push pointer to next ring */ 1280 ring++; 1281 } 1282 1283 if (rxr_count) { 1284 /* assign generic ring traits */ 1285 ring->dev = &adapter->pdev->dev; 1286 ring->netdev = adapter->netdev; 1287 1288 /* configure backlink on ring */ 1289 ring->q_vector = q_vector; 1290 1291 /* update q_vector Rx values */ 1292 igb_add_ring(ring, &q_vector->rx); 1293 1294 /* set flag indicating ring supports SCTP checksum offload */ 1295 if (adapter->hw.mac.type >= e1000_82576) 1296 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); 1297 1298 /* On i350, i354, i210, and i211, loopback VLAN packets 1299 * have the tag byte-swapped. 1300 */ 1301 if (adapter->hw.mac.type >= e1000_i350) 1302 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); 1303 1304 /* apply Rx specific ring traits */ 1305 ring->count = adapter->rx_ring_count; 1306 ring->queue_index = rxr_idx; 1307 1308 u64_stats_init(&ring->rx_syncp); 1309 1310 /* assign ring to adapter */ 1311 adapter->rx_ring[rxr_idx] = ring; 1312 } 1313 1314 return 0; 1315 } 1316 1317 1318 /** 1319 * igb_alloc_q_vectors - Allocate memory for interrupt vectors 1320 * @adapter: board private structure to initialize 1321 * 1322 * We allocate one q_vector per queue interrupt. If allocation fails we 1323 * return -ENOMEM. 1324 **/ 1325 static int igb_alloc_q_vectors(struct igb_adapter *adapter) 1326 { 1327 int q_vectors = adapter->num_q_vectors; 1328 int rxr_remaining = adapter->num_rx_queues; 1329 int txr_remaining = adapter->num_tx_queues; 1330 int rxr_idx = 0, txr_idx = 0, v_idx = 0; 1331 int err; 1332 1333 if (q_vectors >= (rxr_remaining + txr_remaining)) { 1334 for (; rxr_remaining; v_idx++) { 1335 err = igb_alloc_q_vector(adapter, q_vectors, v_idx, 1336 0, 0, 1, rxr_idx); 1337 1338 if (err) 1339 goto err_out; 1340 1341 /* update counts and index */ 1342 rxr_remaining--; 1343 rxr_idx++; 1344 } 1345 } 1346 1347 for (; v_idx < q_vectors; v_idx++) { 1348 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 1349 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 1350 1351 err = igb_alloc_q_vector(adapter, q_vectors, v_idx, 1352 tqpv, txr_idx, rqpv, rxr_idx); 1353 1354 if (err) 1355 goto err_out; 1356 1357 /* update counts and index */ 1358 rxr_remaining -= rqpv; 1359 txr_remaining -= tqpv; 1360 rxr_idx++; 1361 txr_idx++; 1362 } 1363 1364 return 0; 1365 1366 err_out: 1367 adapter->num_tx_queues = 0; 1368 adapter->num_rx_queues = 0; 1369 adapter->num_q_vectors = 0; 1370 1371 while (v_idx--) 1372 igb_free_q_vector(adapter, v_idx); 1373 1374 return -ENOMEM; 1375 } 1376 1377 /** 1378 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors 1379 * @adapter: board private structure to initialize 1380 * @msix: boolean value of MSIX capability 1381 * 1382 * This function initializes the interrupts and allocates all of the queues. 1383 **/ 1384 static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix) 1385 { 1386 struct pci_dev *pdev = adapter->pdev; 1387 int err; 1388 1389 igb_set_interrupt_capability(adapter, msix); 1390 1391 err = igb_alloc_q_vectors(adapter); 1392 if (err) { 1393 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); 1394 goto err_alloc_q_vectors; 1395 } 1396 1397 igb_cache_ring_register(adapter); 1398 1399 return 0; 1400 1401 err_alloc_q_vectors: 1402 igb_reset_interrupt_capability(adapter); 1403 return err; 1404 } 1405 1406 /** 1407 * igb_request_irq - initialize interrupts 1408 * @adapter: board private structure to initialize 1409 * 1410 * Attempts to configure interrupts using the best available 1411 * capabilities of the hardware and kernel. 1412 **/ 1413 static int igb_request_irq(struct igb_adapter *adapter) 1414 { 1415 struct net_device *netdev = adapter->netdev; 1416 struct pci_dev *pdev = adapter->pdev; 1417 int err = 0; 1418 1419 if (adapter->flags & IGB_FLAG_HAS_MSIX) { 1420 err = igb_request_msix(adapter); 1421 if (!err) 1422 goto request_done; 1423 /* fall back to MSI */ 1424 igb_free_all_tx_resources(adapter); 1425 igb_free_all_rx_resources(adapter); 1426 1427 igb_clear_interrupt_scheme(adapter); 1428 err = igb_init_interrupt_scheme(adapter, false); 1429 if (err) 1430 goto request_done; 1431 1432 igb_setup_all_tx_resources(adapter); 1433 igb_setup_all_rx_resources(adapter); 1434 igb_configure(adapter); 1435 } 1436 1437 igb_assign_vector(adapter->q_vector[0], 0); 1438 1439 if (adapter->flags & IGB_FLAG_HAS_MSI) { 1440 err = request_irq(pdev->irq, igb_intr_msi, 0, 1441 netdev->name, adapter); 1442 if (!err) 1443 goto request_done; 1444 1445 /* fall back to legacy interrupts */ 1446 igb_reset_interrupt_capability(adapter); 1447 adapter->flags &= ~IGB_FLAG_HAS_MSI; 1448 } 1449 1450 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED, 1451 netdev->name, adapter); 1452 1453 if (err) 1454 dev_err(&pdev->dev, "Error %d getting interrupt\n", 1455 err); 1456 1457 request_done: 1458 return err; 1459 } 1460 1461 static void igb_free_irq(struct igb_adapter *adapter) 1462 { 1463 if (adapter->flags & IGB_FLAG_HAS_MSIX) { 1464 int vector = 0, i; 1465 1466 free_irq(adapter->msix_entries[vector++].vector, adapter); 1467 1468 for (i = 0; i < adapter->num_q_vectors; i++) 1469 free_irq(adapter->msix_entries[vector++].vector, 1470 adapter->q_vector[i]); 1471 } else { 1472 free_irq(adapter->pdev->irq, adapter); 1473 } 1474 } 1475 1476 /** 1477 * igb_irq_disable - Mask off interrupt generation on the NIC 1478 * @adapter: board private structure 1479 **/ 1480 static void igb_irq_disable(struct igb_adapter *adapter) 1481 { 1482 struct e1000_hw *hw = &adapter->hw; 1483 1484 /* we need to be careful when disabling interrupts. The VFs are also 1485 * mapped into these registers and so clearing the bits can cause 1486 * issues on the VF drivers so we only need to clear what we set 1487 */ 1488 if (adapter->flags & IGB_FLAG_HAS_MSIX) { 1489 u32 regval = rd32(E1000_EIAM); 1490 1491 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); 1492 wr32(E1000_EIMC, adapter->eims_enable_mask); 1493 regval = rd32(E1000_EIAC); 1494 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask); 1495 } 1496 1497 wr32(E1000_IAM, 0); 1498 wr32(E1000_IMC, ~0); 1499 wrfl(); 1500 if (adapter->flags & IGB_FLAG_HAS_MSIX) { 1501 int i; 1502 1503 for (i = 0; i < adapter->num_q_vectors; i++) 1504 synchronize_irq(adapter->msix_entries[i].vector); 1505 } else { 1506 synchronize_irq(adapter->pdev->irq); 1507 } 1508 } 1509 1510 /** 1511 * igb_irq_enable - Enable default interrupt generation settings 1512 * @adapter: board private structure 1513 **/ 1514 static void igb_irq_enable(struct igb_adapter *adapter) 1515 { 1516 struct e1000_hw *hw = &adapter->hw; 1517 1518 if (adapter->flags & IGB_FLAG_HAS_MSIX) { 1519 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA; 1520 u32 regval = rd32(E1000_EIAC); 1521 1522 wr32(E1000_EIAC, regval | adapter->eims_enable_mask); 1523 regval = rd32(E1000_EIAM); 1524 wr32(E1000_EIAM, regval | adapter->eims_enable_mask); 1525 wr32(E1000_EIMS, adapter->eims_enable_mask); 1526 if (adapter->vfs_allocated_count) { 1527 wr32(E1000_MBVFIMR, 0xFF); 1528 ims |= E1000_IMS_VMMB; 1529 } 1530 wr32(E1000_IMS, ims); 1531 } else { 1532 wr32(E1000_IMS, IMS_ENABLE_MASK | 1533 E1000_IMS_DRSTA); 1534 wr32(E1000_IAM, IMS_ENABLE_MASK | 1535 E1000_IMS_DRSTA); 1536 } 1537 } 1538 1539 static void igb_update_mng_vlan(struct igb_adapter *adapter) 1540 { 1541 struct e1000_hw *hw = &adapter->hw; 1542 u16 pf_id = adapter->vfs_allocated_count; 1543 u16 vid = adapter->hw.mng_cookie.vlan_id; 1544 u16 old_vid = adapter->mng_vlan_id; 1545 1546 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { 1547 /* add VID to filter table */ 1548 igb_vfta_set(hw, vid, pf_id, true, true); 1549 adapter->mng_vlan_id = vid; 1550 } else { 1551 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; 1552 } 1553 1554 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && 1555 (vid != old_vid) && 1556 !test_bit(old_vid, adapter->active_vlans)) { 1557 /* remove VID from filter table */ 1558 igb_vfta_set(hw, vid, pf_id, false, true); 1559 } 1560 } 1561 1562 /** 1563 * igb_release_hw_control - release control of the h/w to f/w 1564 * @adapter: address of board private structure 1565 * 1566 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. 1567 * For ASF and Pass Through versions of f/w this means that the 1568 * driver is no longer loaded. 1569 **/ 1570 static void igb_release_hw_control(struct igb_adapter *adapter) 1571 { 1572 struct e1000_hw *hw = &adapter->hw; 1573 u32 ctrl_ext; 1574 1575 /* Let firmware take over control of h/w */ 1576 ctrl_ext = rd32(E1000_CTRL_EXT); 1577 wr32(E1000_CTRL_EXT, 1578 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 1579 } 1580 1581 /** 1582 * igb_get_hw_control - get control of the h/w from f/w 1583 * @adapter: address of board private structure 1584 * 1585 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. 1586 * For ASF and Pass Through versions of f/w this means that 1587 * the driver is loaded. 1588 **/ 1589 static void igb_get_hw_control(struct igb_adapter *adapter) 1590 { 1591 struct e1000_hw *hw = &adapter->hw; 1592 u32 ctrl_ext; 1593 1594 /* Let firmware know the driver has taken over */ 1595 ctrl_ext = rd32(E1000_CTRL_EXT); 1596 wr32(E1000_CTRL_EXT, 1597 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 1598 } 1599 1600 static void enable_fqtss(struct igb_adapter *adapter, bool enable) 1601 { 1602 struct net_device *netdev = adapter->netdev; 1603 struct e1000_hw *hw = &adapter->hw; 1604 1605 WARN_ON(hw->mac.type != e1000_i210); 1606 1607 if (enable) 1608 adapter->flags |= IGB_FLAG_FQTSS; 1609 else 1610 adapter->flags &= ~IGB_FLAG_FQTSS; 1611 1612 if (netif_running(netdev)) 1613 schedule_work(&adapter->reset_task); 1614 } 1615 1616 static bool is_fqtss_enabled(struct igb_adapter *adapter) 1617 { 1618 return (adapter->flags & IGB_FLAG_FQTSS) ? true : false; 1619 } 1620 1621 static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue, 1622 enum tx_queue_prio prio) 1623 { 1624 u32 val; 1625 1626 WARN_ON(hw->mac.type != e1000_i210); 1627 WARN_ON(queue < 0 || queue > 4); 1628 1629 val = rd32(E1000_I210_TXDCTL(queue)); 1630 1631 if (prio == TX_QUEUE_PRIO_HIGH) 1632 val |= E1000_TXDCTL_PRIORITY; 1633 else 1634 val &= ~E1000_TXDCTL_PRIORITY; 1635 1636 wr32(E1000_I210_TXDCTL(queue), val); 1637 } 1638 1639 static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode) 1640 { 1641 u32 val; 1642 1643 WARN_ON(hw->mac.type != e1000_i210); 1644 WARN_ON(queue < 0 || queue > 1); 1645 1646 val = rd32(E1000_I210_TQAVCC(queue)); 1647 1648 if (mode == QUEUE_MODE_STREAM_RESERVATION) 1649 val |= E1000_TQAVCC_QUEUEMODE; 1650 else 1651 val &= ~E1000_TQAVCC_QUEUEMODE; 1652 1653 wr32(E1000_I210_TQAVCC(queue), val); 1654 } 1655 1656 static bool is_any_cbs_enabled(struct igb_adapter *adapter) 1657 { 1658 int i; 1659 1660 for (i = 0; i < adapter->num_tx_queues; i++) { 1661 if (adapter->tx_ring[i]->cbs_enable) 1662 return true; 1663 } 1664 1665 return false; 1666 } 1667 1668 static bool is_any_txtime_enabled(struct igb_adapter *adapter) 1669 { 1670 int i; 1671 1672 for (i = 0; i < adapter->num_tx_queues; i++) { 1673 if (adapter->tx_ring[i]->launchtime_enable) 1674 return true; 1675 } 1676 1677 return false; 1678 } 1679 1680 /** 1681 * igb_config_tx_modes - Configure "Qav Tx mode" features on igb 1682 * @adapter: pointer to adapter struct 1683 * @queue: queue number 1684 * 1685 * Configure CBS and Launchtime for a given hardware queue. 1686 * Parameters are retrieved from the correct Tx ring, so 1687 * igb_save_cbs_params() and igb_save_txtime_params() should be used 1688 * for setting those correctly prior to this function being called. 1689 **/ 1690 static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) 1691 { 1692 struct igb_ring *ring = adapter->tx_ring[queue]; 1693 struct net_device *netdev = adapter->netdev; 1694 struct e1000_hw *hw = &adapter->hw; 1695 u32 tqavcc, tqavctrl; 1696 u16 value; 1697 1698 WARN_ON(hw->mac.type != e1000_i210); 1699 WARN_ON(queue < 0 || queue > 1); 1700 1701 /* If any of the Qav features is enabled, configure queues as SR and 1702 * with HIGH PRIO. If none is, then configure them with LOW PRIO and 1703 * as SP. 1704 */ 1705 if (ring->cbs_enable || ring->launchtime_enable) { 1706 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); 1707 set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION); 1708 } else { 1709 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW); 1710 set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY); 1711 } 1712 1713 /* If CBS is enabled, set DataTranARB and config its parameters. */ 1714 if (ring->cbs_enable || queue == 0) { 1715 /* i210 does not allow the queue 0 to be in the Strict 1716 * Priority mode while the Qav mode is enabled, so, 1717 * instead of disabling strict priority mode, we give 1718 * queue 0 the maximum of credits possible. 1719 * 1720 * See section 8.12.19 of the i210 datasheet, "Note: 1721 * Queue0 QueueMode must be set to 1b when 1722 * TransmitMode is set to Qav." 1723 */ 1724 if (queue == 0 && !ring->cbs_enable) { 1725 /* max "linkspeed" idleslope in kbps */ 1726 ring->idleslope = 1000000; 1727 ring->hicredit = ETH_FRAME_LEN; 1728 } 1729 1730 /* Always set data transfer arbitration to credit-based 1731 * shaper algorithm on TQAVCTRL if CBS is enabled for any of 1732 * the queues. 1733 */ 1734 tqavctrl = rd32(E1000_I210_TQAVCTRL); 1735 tqavctrl |= E1000_TQAVCTRL_DATATRANARB; 1736 wr32(E1000_I210_TQAVCTRL, tqavctrl); 1737 1738 /* According to i210 datasheet section 7.2.7.7, we should set 1739 * the 'idleSlope' field from TQAVCC register following the 1740 * equation: 1741 * 1742 * For 100 Mbps link speed: 1743 * 1744 * value = BW * 0x7735 * 0.2 (E1) 1745 * 1746 * For 1000Mbps link speed: 1747 * 1748 * value = BW * 0x7735 * 2 (E2) 1749 * 1750 * E1 and E2 can be merged into one equation as shown below. 1751 * Note that 'link-speed' is in Mbps. 1752 * 1753 * value = BW * 0x7735 * 2 * link-speed 1754 * -------------- (E3) 1755 * 1000 1756 * 1757 * 'BW' is the percentage bandwidth out of full link speed 1758 * which can be found with the following equation. Note that 1759 * idleSlope here is the parameter from this function which 1760 * is in kbps. 1761 * 1762 * BW = idleSlope 1763 * ----------------- (E4) 1764 * link-speed * 1000 1765 * 1766 * That said, we can come up with a generic equation to 1767 * calculate the value we should set it TQAVCC register by 1768 * replacing 'BW' in E3 by E4. The resulting equation is: 1769 * 1770 * value = idleSlope * 0x7735 * 2 * link-speed 1771 * ----------------- -------------- (E5) 1772 * link-speed * 1000 1000 1773 * 1774 * 'link-speed' is present in both sides of the fraction so 1775 * it is canceled out. The final equation is the following: 1776 * 1777 * value = idleSlope * 61034 1778 * ----------------- (E6) 1779 * 1000000 1780 * 1781 * NOTE: For i210, given the above, we can see that idleslope 1782 * is represented in 16.38431 kbps units by the value at 1783 * the TQAVCC register (1Gbps / 61034), which reduces 1784 * the granularity for idleslope increments. 1785 * For instance, if you want to configure a 2576kbps 1786 * idleslope, the value to be written on the register 1787 * would have to be 157.23. If rounded down, you end 1788 * up with less bandwidth available than originally 1789 * required (~2572 kbps). If rounded up, you end up 1790 * with a higher bandwidth (~2589 kbps). Below the 1791 * approach we take is to always round up the 1792 * calculated value, so the resulting bandwidth might 1793 * be slightly higher for some configurations. 1794 */ 1795 value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000); 1796 1797 tqavcc = rd32(E1000_I210_TQAVCC(queue)); 1798 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK; 1799 tqavcc |= value; 1800 wr32(E1000_I210_TQAVCC(queue), tqavcc); 1801 1802 wr32(E1000_I210_TQAVHC(queue), 1803 0x80000000 + ring->hicredit * 0x7735); 1804 } else { 1805 1806 /* Set idleSlope to zero. */ 1807 tqavcc = rd32(E1000_I210_TQAVCC(queue)); 1808 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK; 1809 wr32(E1000_I210_TQAVCC(queue), tqavcc); 1810 1811 /* Set hiCredit to zero. */ 1812 wr32(E1000_I210_TQAVHC(queue), 0); 1813 1814 /* If CBS is not enabled for any queues anymore, then return to 1815 * the default state of Data Transmission Arbitration on 1816 * TQAVCTRL. 1817 */ 1818 if (!is_any_cbs_enabled(adapter)) { 1819 tqavctrl = rd32(E1000_I210_TQAVCTRL); 1820 tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB; 1821 wr32(E1000_I210_TQAVCTRL, tqavctrl); 1822 } 1823 } 1824 1825 /* If LaunchTime is enabled, set DataTranTIM. */ 1826 if (ring->launchtime_enable) { 1827 /* Always set DataTranTIM on TQAVCTRL if LaunchTime is enabled 1828 * for any of the SR queues, and configure fetchtime delta. 1829 * XXX NOTE: 1830 * - LaunchTime will be enabled for all SR queues. 1831 * - A fixed offset can be added relative to the launch 1832 * time of all packets if configured at reg LAUNCH_OS0. 1833 * We are keeping it as 0 for now (default value). 1834 */ 1835 tqavctrl = rd32(E1000_I210_TQAVCTRL); 1836 tqavctrl |= E1000_TQAVCTRL_DATATRANTIM | 1837 E1000_TQAVCTRL_FETCHTIME_DELTA; 1838 wr32(E1000_I210_TQAVCTRL, tqavctrl); 1839 } else { 1840 /* If Launchtime is not enabled for any SR queues anymore, 1841 * then clear DataTranTIM on TQAVCTRL and clear fetchtime delta, 1842 * effectively disabling Launchtime. 1843 */ 1844 if (!is_any_txtime_enabled(adapter)) { 1845 tqavctrl = rd32(E1000_I210_TQAVCTRL); 1846 tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM; 1847 tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA; 1848 wr32(E1000_I210_TQAVCTRL, tqavctrl); 1849 } 1850 } 1851 1852 /* XXX: In i210 controller the sendSlope and loCredit parameters from 1853 * CBS are not configurable by software so we don't do any 'controller 1854 * configuration' in respect to these parameters. 1855 */ 1856 1857 netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d \ 1858 idleslope %d sendslope %d hiCredit %d \ 1859 locredit %d\n", 1860 (ring->cbs_enable) ? "enabled" : "disabled", 1861 (ring->launchtime_enable) ? "enabled" : "disabled", queue, 1862 ring->idleslope, ring->sendslope, ring->hicredit, 1863 ring->locredit); 1864 } 1865 1866 static int igb_save_txtime_params(struct igb_adapter *adapter, int queue, 1867 bool enable) 1868 { 1869 struct igb_ring *ring; 1870 1871 if (queue < 0 || queue > adapter->num_tx_queues) 1872 return -EINVAL; 1873 1874 ring = adapter->tx_ring[queue]; 1875 ring->launchtime_enable = enable; 1876 1877 return 0; 1878 } 1879 1880 static int igb_save_cbs_params(struct igb_adapter *adapter, int queue, 1881 bool enable, int idleslope, int sendslope, 1882 int hicredit, int locredit) 1883 { 1884 struct igb_ring *ring; 1885 1886 if (queue < 0 || queue > adapter->num_tx_queues) 1887 return -EINVAL; 1888 1889 ring = adapter->tx_ring[queue]; 1890 1891 ring->cbs_enable = enable; 1892 ring->idleslope = idleslope; 1893 ring->sendslope = sendslope; 1894 ring->hicredit = hicredit; 1895 ring->locredit = locredit; 1896 1897 return 0; 1898 } 1899 1900 /** 1901 * igb_setup_tx_mode - Switch to/from Qav Tx mode when applicable 1902 * @adapter: pointer to adapter struct 1903 * 1904 * Configure TQAVCTRL register switching the controller's Tx mode 1905 * if FQTSS mode is enabled or disabled. Additionally, will issue 1906 * a call to igb_config_tx_modes() per queue so any previously saved 1907 * Tx parameters are applied. 1908 **/ 1909 static void igb_setup_tx_mode(struct igb_adapter *adapter) 1910 { 1911 struct net_device *netdev = adapter->netdev; 1912 struct e1000_hw *hw = &adapter->hw; 1913 u32 val; 1914 1915 /* Only i210 controller supports changing the transmission mode. */ 1916 if (hw->mac.type != e1000_i210) 1917 return; 1918 1919 if (is_fqtss_enabled(adapter)) { 1920 int i, max_queue; 1921 1922 /* Configure TQAVCTRL register: set transmit mode to 'Qav', 1923 * set data fetch arbitration to 'round robin', set SP_WAIT_SR 1924 * so SP queues wait for SR ones. 1925 */ 1926 val = rd32(E1000_I210_TQAVCTRL); 1927 val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR; 1928 val &= ~E1000_TQAVCTRL_DATAFETCHARB; 1929 wr32(E1000_I210_TQAVCTRL, val); 1930 1931 /* Configure Tx and Rx packet buffers sizes as described in 1932 * i210 datasheet section 7.2.7.7. 1933 */ 1934 val = rd32(E1000_TXPBS); 1935 val &= ~I210_TXPBSIZE_MASK; 1936 val |= I210_TXPBSIZE_PB0_8KB | I210_TXPBSIZE_PB1_8KB | 1937 I210_TXPBSIZE_PB2_4KB | I210_TXPBSIZE_PB3_4KB; 1938 wr32(E1000_TXPBS, val); 1939 1940 val = rd32(E1000_RXPBS); 1941 val &= ~I210_RXPBSIZE_MASK; 1942 val |= I210_RXPBSIZE_PB_32KB; 1943 wr32(E1000_RXPBS, val); 1944 1945 /* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ 1946 * register should not exceed the buffer size programmed in 1947 * TXPBS. The smallest buffer size programmed in TXPBS is 4kB 1948 * so according to the datasheet we should set MAX_TPKT_SIZE to 1949 * 4kB / 64. 1950 * 1951 * However, when we do so, no frame from queue 2 and 3 are 1952 * transmitted. It seems the MAX_TPKT_SIZE should not be great 1953 * or _equal_ to the buffer size programmed in TXPBS. For this 1954 * reason, we set set MAX_ TPKT_SIZE to (4kB - 1) / 64. 1955 */ 1956 val = (4096 - 1) / 64; 1957 wr32(E1000_I210_DTXMXPKTSZ, val); 1958 1959 /* Since FQTSS mode is enabled, apply any CBS configuration 1960 * previously set. If no previous CBS configuration has been 1961 * done, then the initial configuration is applied, which means 1962 * CBS is disabled. 1963 */ 1964 max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ? 1965 adapter->num_tx_queues : I210_SR_QUEUES_NUM; 1966 1967 for (i = 0; i < max_queue; i++) { 1968 igb_config_tx_modes(adapter, i); 1969 } 1970 } else { 1971 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT); 1972 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT); 1973 wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT); 1974 1975 val = rd32(E1000_I210_TQAVCTRL); 1976 /* According to Section 8.12.21, the other flags we've set when 1977 * enabling FQTSS are not relevant when disabling FQTSS so we 1978 * don't set they here. 1979 */ 1980 val &= ~E1000_TQAVCTRL_XMIT_MODE; 1981 wr32(E1000_I210_TQAVCTRL, val); 1982 } 1983 1984 netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ? 1985 "enabled" : "disabled"); 1986 } 1987 1988 /** 1989 * igb_configure - configure the hardware for RX and TX 1990 * @adapter: private board structure 1991 **/ 1992 static void igb_configure(struct igb_adapter *adapter) 1993 { 1994 struct net_device *netdev = adapter->netdev; 1995 int i; 1996 1997 igb_get_hw_control(adapter); 1998 igb_set_rx_mode(netdev); 1999 igb_setup_tx_mode(adapter); 2000 2001 igb_restore_vlan(adapter); 2002 2003 igb_setup_tctl(adapter); 2004 igb_setup_mrqc(adapter); 2005 igb_setup_rctl(adapter); 2006 2007 igb_nfc_filter_restore(adapter); 2008 igb_configure_tx(adapter); 2009 igb_configure_rx(adapter); 2010 2011 igb_rx_fifo_flush_82575(&adapter->hw); 2012 2013 /* call igb_desc_unused which always leaves 2014 * at least 1 descriptor unused to make sure 2015 * next_to_use != next_to_clean 2016 */ 2017 for (i = 0; i < adapter->num_rx_queues; i++) { 2018 struct igb_ring *ring = adapter->rx_ring[i]; 2019 igb_alloc_rx_buffers(ring, igb_desc_unused(ring)); 2020 } 2021 } 2022 2023 /** 2024 * igb_power_up_link - Power up the phy/serdes link 2025 * @adapter: address of board private structure 2026 **/ 2027 void igb_power_up_link(struct igb_adapter *adapter) 2028 { 2029 igb_reset_phy(&adapter->hw); 2030 2031 if (adapter->hw.phy.media_type == e1000_media_type_copper) 2032 igb_power_up_phy_copper(&adapter->hw); 2033 else 2034 igb_power_up_serdes_link_82575(&adapter->hw); 2035 2036 igb_setup_link(&adapter->hw); 2037 } 2038 2039 /** 2040 * igb_power_down_link - Power down the phy/serdes link 2041 * @adapter: address of board private structure 2042 */ 2043 static void igb_power_down_link(struct igb_adapter *adapter) 2044 { 2045 if (adapter->hw.phy.media_type == e1000_media_type_copper) 2046 igb_power_down_phy_copper_82575(&adapter->hw); 2047 else 2048 igb_shutdown_serdes_link_82575(&adapter->hw); 2049 } 2050 2051 /** 2052 * Detect and switch function for Media Auto Sense 2053 * @adapter: address of the board private structure 2054 **/ 2055 static void igb_check_swap_media(struct igb_adapter *adapter) 2056 { 2057 struct e1000_hw *hw = &adapter->hw; 2058 u32 ctrl_ext, connsw; 2059 bool swap_now = false; 2060 2061 ctrl_ext = rd32(E1000_CTRL_EXT); 2062 connsw = rd32(E1000_CONNSW); 2063 2064 /* need to live swap if current media is copper and we have fiber/serdes 2065 * to go to. 2066 */ 2067 2068 if ((hw->phy.media_type == e1000_media_type_copper) && 2069 (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) { 2070 swap_now = true; 2071 } else if (!(connsw & E1000_CONNSW_SERDESD)) { 2072 /* copper signal takes time to appear */ 2073 if (adapter->copper_tries < 4) { 2074 adapter->copper_tries++; 2075 connsw |= E1000_CONNSW_AUTOSENSE_CONF; 2076 wr32(E1000_CONNSW, connsw); 2077 return; 2078 } else { 2079 adapter->copper_tries = 0; 2080 if ((connsw & E1000_CONNSW_PHYSD) && 2081 (!(connsw & E1000_CONNSW_PHY_PDN))) { 2082 swap_now = true; 2083 connsw &= ~E1000_CONNSW_AUTOSENSE_CONF; 2084 wr32(E1000_CONNSW, connsw); 2085 } 2086 } 2087 } 2088 2089 if (!swap_now) 2090 return; 2091 2092 switch (hw->phy.media_type) { 2093 case e1000_media_type_copper: 2094 netdev_info(adapter->netdev, 2095 "MAS: changing media to fiber/serdes\n"); 2096 ctrl_ext |= 2097 E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; 2098 adapter->flags |= IGB_FLAG_MEDIA_RESET; 2099 adapter->copper_tries = 0; 2100 break; 2101 case e1000_media_type_internal_serdes: 2102 case e1000_media_type_fiber: 2103 netdev_info(adapter->netdev, 2104 "MAS: changing media to copper\n"); 2105 ctrl_ext &= 2106 ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; 2107 adapter->flags |= IGB_FLAG_MEDIA_RESET; 2108 break; 2109 default: 2110 /* shouldn't get here during regular operation */ 2111 netdev_err(adapter->netdev, 2112 "AMS: Invalid media type found, returning\n"); 2113 break; 2114 } 2115 wr32(E1000_CTRL_EXT, ctrl_ext); 2116 } 2117 2118 /** 2119 * igb_up - Open the interface and prepare it to handle traffic 2120 * @adapter: board private structure 2121 **/ 2122 int igb_up(struct igb_adapter *adapter) 2123 { 2124 struct e1000_hw *hw = &adapter->hw; 2125 int i; 2126 2127 /* hardware has been reset, we need to reload some things */ 2128 igb_configure(adapter); 2129 2130 clear_bit(__IGB_DOWN, &adapter->state); 2131 2132 for (i = 0; i < adapter->num_q_vectors; i++) 2133 napi_enable(&(adapter->q_vector[i]->napi)); 2134 2135 if (adapter->flags & IGB_FLAG_HAS_MSIX) 2136 igb_configure_msix(adapter); 2137 else 2138 igb_assign_vector(adapter->q_vector[0], 0); 2139 2140 /* Clear any pending interrupts. */ 2141 rd32(E1000_TSICR); 2142 rd32(E1000_ICR); 2143 igb_irq_enable(adapter); 2144 2145 /* notify VFs that reset has been completed */ 2146 if (adapter->vfs_allocated_count) { 2147 u32 reg_data = rd32(E1000_CTRL_EXT); 2148 2149 reg_data |= E1000_CTRL_EXT_PFRSTD; 2150 wr32(E1000_CTRL_EXT, reg_data); 2151 } 2152 2153 netif_tx_start_all_queues(adapter->netdev); 2154 2155 /* start the watchdog. */ 2156 hw->mac.get_link_status = 1; 2157 schedule_work(&adapter->watchdog_task); 2158 2159 if ((adapter->flags & IGB_FLAG_EEE) && 2160 (!hw->dev_spec._82575.eee_disable)) 2161 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; 2162 2163 return 0; 2164 } 2165 2166 void igb_down(struct igb_adapter *adapter) 2167 { 2168 struct net_device *netdev = adapter->netdev; 2169 struct e1000_hw *hw = &adapter->hw; 2170 u32 tctl, rctl; 2171 int i; 2172 2173 /* signal that we're down so the interrupt handler does not 2174 * reschedule our watchdog timer 2175 */ 2176 set_bit(__IGB_DOWN, &adapter->state); 2177 2178 /* disable receives in the hardware */ 2179 rctl = rd32(E1000_RCTL); 2180 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); 2181 /* flush and sleep below */ 2182 2183 igb_nfc_filter_exit(adapter); 2184 2185 netif_carrier_off(netdev); 2186 netif_tx_stop_all_queues(netdev); 2187 2188 /* disable transmits in the hardware */ 2189 tctl = rd32(E1000_TCTL); 2190 tctl &= ~E1000_TCTL_EN; 2191 wr32(E1000_TCTL, tctl); 2192 /* flush both disables and wait for them to finish */ 2193 wrfl(); 2194 usleep_range(10000, 11000); 2195 2196 igb_irq_disable(adapter); 2197 2198 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; 2199 2200 for (i = 0; i < adapter->num_q_vectors; i++) { 2201 if (adapter->q_vector[i]) { 2202 napi_synchronize(&adapter->q_vector[i]->napi); 2203 napi_disable(&adapter->q_vector[i]->napi); 2204 } 2205 } 2206 2207 del_timer_sync(&adapter->watchdog_timer); 2208 del_timer_sync(&adapter->phy_info_timer); 2209 2210 /* record the stats before reset*/ 2211 spin_lock(&adapter->stats64_lock); 2212 igb_update_stats(adapter); 2213 spin_unlock(&adapter->stats64_lock); 2214 2215 adapter->link_speed = 0; 2216 adapter->link_duplex = 0; 2217 2218 if (!pci_channel_offline(adapter->pdev)) 2219 igb_reset(adapter); 2220 2221 /* clear VLAN promisc flag so VFTA will be updated if necessary */ 2222 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC; 2223 2224 igb_clean_all_tx_rings(adapter); 2225 igb_clean_all_rx_rings(adapter); 2226 #ifdef CONFIG_IGB_DCA 2227 2228 /* since we reset the hardware DCA settings were cleared */ 2229 igb_setup_dca(adapter); 2230 #endif 2231 } 2232 2233 void igb_reinit_locked(struct igb_adapter *adapter) 2234 { 2235 WARN_ON(in_interrupt()); 2236 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 2237 usleep_range(1000, 2000); 2238 igb_down(adapter); 2239 igb_up(adapter); 2240 clear_bit(__IGB_RESETTING, &adapter->state); 2241 } 2242 2243 /** igb_enable_mas - Media Autosense re-enable after swap 2244 * 2245 * @adapter: adapter struct 2246 **/ 2247 static void igb_enable_mas(struct igb_adapter *adapter) 2248 { 2249 struct e1000_hw *hw = &adapter->hw; 2250 u32 connsw = rd32(E1000_CONNSW); 2251 2252 /* configure for SerDes media detect */ 2253 if ((hw->phy.media_type == e1000_media_type_copper) && 2254 (!(connsw & E1000_CONNSW_SERDESD))) { 2255 connsw |= E1000_CONNSW_ENRGSRC; 2256 connsw |= E1000_CONNSW_AUTOSENSE_EN; 2257 wr32(E1000_CONNSW, connsw); 2258 wrfl(); 2259 } 2260 } 2261 2262 void igb_reset(struct igb_adapter *adapter) 2263 { 2264 struct pci_dev *pdev = adapter->pdev; 2265 struct e1000_hw *hw = &adapter->hw; 2266 struct e1000_mac_info *mac = &hw->mac; 2267 struct e1000_fc_info *fc = &hw->fc; 2268 u32 pba, hwm; 2269 2270 /* Repartition Pba for greater than 9k mtu 2271 * To take effect CTRL.RST is required. 2272 */ 2273 switch (mac->type) { 2274 case e1000_i350: 2275 case e1000_i354: 2276 case e1000_82580: 2277 pba = rd32(E1000_RXPBS); 2278 pba = igb_rxpbs_adjust_82580(pba); 2279 break; 2280 case e1000_82576: 2281 pba = rd32(E1000_RXPBS); 2282 pba &= E1000_RXPBS_SIZE_MASK_82576; 2283 break; 2284 case e1000_82575: 2285 case e1000_i210: 2286 case e1000_i211: 2287 default: 2288 pba = E1000_PBA_34K; 2289 break; 2290 } 2291 2292 if (mac->type == e1000_82575) { 2293 u32 min_rx_space, min_tx_space, needed_tx_space; 2294 2295 /* write Rx PBA so that hardware can report correct Tx PBA */ 2296 wr32(E1000_PBA, pba); 2297 2298 /* To maintain wire speed transmits, the Tx FIFO should be 2299 * large enough to accommodate two full transmit packets, 2300 * rounded up to the next 1KB and expressed in KB. Likewise, 2301 * the Rx FIFO should be large enough to accommodate at least 2302 * one full receive packet and is similarly rounded up and 2303 * expressed in KB. 2304 */ 2305 min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024); 2306 2307 /* The Tx FIFO also stores 16 bytes of information about the Tx 2308 * but don't include Ethernet FCS because hardware appends it. 2309 * We only need to round down to the nearest 512 byte block 2310 * count since the value we care about is 2 frames, not 1. 2311 */ 2312 min_tx_space = adapter->max_frame_size; 2313 min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN; 2314 min_tx_space = DIV_ROUND_UP(min_tx_space, 512); 2315 2316 /* upper 16 bits has Tx packet buffer allocation size in KB */ 2317 needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16); 2318 2319 /* If current Tx allocation is less than the min Tx FIFO size, 2320 * and the min Tx FIFO size is less than the current Rx FIFO 2321 * allocation, take space away from current Rx allocation. 2322 */ 2323 if (needed_tx_space < pba) { 2324 pba -= needed_tx_space; 2325 2326 /* if short on Rx space, Rx wins and must trump Tx 2327 * adjustment 2328 */ 2329 if (pba < min_rx_space) 2330 pba = min_rx_space; 2331 } 2332 2333 /* adjust PBA for jumbo frames */ 2334 wr32(E1000_PBA, pba); 2335 } 2336 2337 /* flow control settings 2338 * The high water mark must be low enough to fit one full frame 2339 * after transmitting the pause frame. As such we must have enough 2340 * space to allow for us to complete our current transmit and then 2341 * receive the frame that is in progress from the link partner. 2342 * Set it to: 2343 * - the full Rx FIFO size minus one full Tx plus one full Rx frame 2344 */ 2345 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); 2346 2347 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ 2348 fc->low_water = fc->high_water - 16; 2349 fc->pause_time = 0xFFFF; 2350 fc->send_xon = 1; 2351 fc->current_mode = fc->requested_mode; 2352 2353 /* disable receive for all VFs and wait one second */ 2354 if (adapter->vfs_allocated_count) { 2355 int i; 2356 2357 for (i = 0 ; i < adapter->vfs_allocated_count; i++) 2358 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; 2359 2360 /* ping all the active vfs to let them know we are going down */ 2361 igb_ping_all_vfs(adapter); 2362 2363 /* disable transmits and receives */ 2364 wr32(E1000_VFRE, 0); 2365 wr32(E1000_VFTE, 0); 2366 } 2367 2368 /* Allow time for pending master requests to run */ 2369 hw->mac.ops.reset_hw(hw); 2370 wr32(E1000_WUC, 0); 2371 2372 if (adapter->flags & IGB_FLAG_MEDIA_RESET) { 2373 /* need to resetup here after media swap */ 2374 adapter->ei.get_invariants(hw); 2375 adapter->flags &= ~IGB_FLAG_MEDIA_RESET; 2376 } 2377 if ((mac->type == e1000_82575) && 2378 (adapter->flags & IGB_FLAG_MAS_ENABLE)) { 2379 igb_enable_mas(adapter); 2380 } 2381 if (hw->mac.ops.init_hw(hw)) 2382 dev_err(&pdev->dev, "Hardware Error\n"); 2383 2384 /* RAR registers were cleared during init_hw, clear mac table */ 2385 igb_flush_mac_table(adapter); 2386 __dev_uc_unsync(adapter->netdev, NULL); 2387 2388 /* Recover default RAR entry */ 2389 igb_set_default_mac_filter(adapter); 2390 2391 /* Flow control settings reset on hardware reset, so guarantee flow 2392 * control is off when forcing speed. 2393 */ 2394 if (!hw->mac.autoneg) 2395 igb_force_mac_fc(hw); 2396 2397 igb_init_dmac(adapter, pba); 2398 #ifdef CONFIG_IGB_HWMON 2399 /* Re-initialize the thermal sensor on i350 devices. */ 2400 if (!test_bit(__IGB_DOWN, &adapter->state)) { 2401 if (mac->type == e1000_i350 && hw->bus.func == 0) { 2402 /* If present, re-initialize the external thermal sensor 2403 * interface. 2404 */ 2405 if (adapter->ets) 2406 mac->ops.init_thermal_sensor_thresh(hw); 2407 } 2408 } 2409 #endif 2410 /* Re-establish EEE setting */ 2411 if (hw->phy.media_type == e1000_media_type_copper) { 2412 switch (mac->type) { 2413 case e1000_i350: 2414 case e1000_i210: 2415 case e1000_i211: 2416 igb_set_eee_i350(hw, true, true); 2417 break; 2418 case e1000_i354: 2419 igb_set_eee_i354(hw, true, true); 2420 break; 2421 default: 2422 break; 2423 } 2424 } 2425 if (!netif_running(adapter->netdev)) 2426 igb_power_down_link(adapter); 2427 2428 igb_update_mng_vlan(adapter); 2429 2430 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 2431 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); 2432 2433 /* Re-enable PTP, where applicable. */ 2434 if (adapter->ptp_flags & IGB_PTP_ENABLED) 2435 igb_ptp_reset(adapter); 2436 2437 igb_get_phy_info(hw); 2438 } 2439 2440 static netdev_features_t igb_fix_features(struct net_device *netdev, 2441 netdev_features_t features) 2442 { 2443 /* Since there is no support for separate Rx/Tx vlan accel 2444 * enable/disable make sure Tx flag is always in same state as Rx. 2445 */ 2446 if (features & NETIF_F_HW_VLAN_CTAG_RX) 2447 features |= NETIF_F_HW_VLAN_CTAG_TX; 2448 else 2449 features &= ~NETIF_F_HW_VLAN_CTAG_TX; 2450 2451 return features; 2452 } 2453 2454 static int igb_set_features(struct net_device *netdev, 2455 netdev_features_t features) 2456 { 2457 netdev_features_t changed = netdev->features ^ features; 2458 struct igb_adapter *adapter = netdev_priv(netdev); 2459 2460 if (changed & NETIF_F_HW_VLAN_CTAG_RX) 2461 igb_vlan_mode(netdev, features); 2462 2463 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) 2464 return 0; 2465 2466 if (!(features & NETIF_F_NTUPLE)) { 2467 struct hlist_node *node2; 2468 struct igb_nfc_filter *rule; 2469 2470 spin_lock(&adapter->nfc_lock); 2471 hlist_for_each_entry_safe(rule, node2, 2472 &adapter->nfc_filter_list, nfc_node) { 2473 igb_erase_filter(adapter, rule); 2474 hlist_del(&rule->nfc_node); 2475 kfree(rule); 2476 } 2477 spin_unlock(&adapter->nfc_lock); 2478 adapter->nfc_filter_count = 0; 2479 } 2480 2481 netdev->features = features; 2482 2483 if (netif_running(netdev)) 2484 igb_reinit_locked(adapter); 2485 else 2486 igb_reset(adapter); 2487 2488 return 0; 2489 } 2490 2491 static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 2492 struct net_device *dev, 2493 const unsigned char *addr, u16 vid, 2494 u16 flags) 2495 { 2496 /* guarantee we can provide a unique filter for the unicast address */ 2497 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { 2498 struct igb_adapter *adapter = netdev_priv(dev); 2499 int vfn = adapter->vfs_allocated_count; 2500 2501 if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn)) 2502 return -ENOMEM; 2503 } 2504 2505 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); 2506 } 2507 2508 #define IGB_MAX_MAC_HDR_LEN 127 2509 #define IGB_MAX_NETWORK_HDR_LEN 511 2510 2511 static netdev_features_t 2512 igb_features_check(struct sk_buff *skb, struct net_device *dev, 2513 netdev_features_t features) 2514 { 2515 unsigned int network_hdr_len, mac_hdr_len; 2516 2517 /* Make certain the headers can be described by a context descriptor */ 2518 mac_hdr_len = skb_network_header(skb) - skb->data; 2519 if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN)) 2520 return features & ~(NETIF_F_HW_CSUM | 2521 NETIF_F_SCTP_CRC | 2522 NETIF_F_HW_VLAN_CTAG_TX | 2523 NETIF_F_TSO | 2524 NETIF_F_TSO6); 2525 2526 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); 2527 if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN)) 2528 return features & ~(NETIF_F_HW_CSUM | 2529 NETIF_F_SCTP_CRC | 2530 NETIF_F_TSO | 2531 NETIF_F_TSO6); 2532 2533 /* We can only support IPV4 TSO in tunnels if we can mangle the 2534 * inner IP ID field, so strip TSO if MANGLEID is not supported. 2535 */ 2536 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) 2537 features &= ~NETIF_F_TSO; 2538 2539 return features; 2540 } 2541 2542 static void igb_offload_apply(struct igb_adapter *adapter, s32 queue) 2543 { 2544 if (!is_fqtss_enabled(adapter)) { 2545 enable_fqtss(adapter, true); 2546 return; 2547 } 2548 2549 igb_config_tx_modes(adapter, queue); 2550 2551 if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter)) 2552 enable_fqtss(adapter, false); 2553 } 2554 2555 static int igb_offload_cbs(struct igb_adapter *adapter, 2556 struct tc_cbs_qopt_offload *qopt) 2557 { 2558 struct e1000_hw *hw = &adapter->hw; 2559 int err; 2560 2561 /* CBS offloading is only supported by i210 controller. */ 2562 if (hw->mac.type != e1000_i210) 2563 return -EOPNOTSUPP; 2564 2565 /* CBS offloading is only supported by queue 0 and queue 1. */ 2566 if (qopt->queue < 0 || qopt->queue > 1) 2567 return -EINVAL; 2568 2569 err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable, 2570 qopt->idleslope, qopt->sendslope, 2571 qopt->hicredit, qopt->locredit); 2572 if (err) 2573 return err; 2574 2575 igb_offload_apply(adapter, qopt->queue); 2576 2577 return 0; 2578 } 2579 2580 #define ETHER_TYPE_FULL_MASK ((__force __be16)~0) 2581 #define VLAN_PRIO_FULL_MASK (0x07) 2582 2583 static int igb_parse_cls_flower(struct igb_adapter *adapter, 2584 struct tc_cls_flower_offload *f, 2585 int traffic_class, 2586 struct igb_nfc_filter *input) 2587 { 2588 struct netlink_ext_ack *extack = f->common.extack; 2589 2590 if (f->dissector->used_keys & 2591 ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | 2592 BIT(FLOW_DISSECTOR_KEY_CONTROL) | 2593 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 2594 BIT(FLOW_DISSECTOR_KEY_VLAN))) { 2595 NL_SET_ERR_MSG_MOD(extack, 2596 "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported"); 2597 return -EOPNOTSUPP; 2598 } 2599 2600 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 2601 struct flow_dissector_key_eth_addrs *key, *mask; 2602 2603 key = skb_flow_dissector_target(f->dissector, 2604 FLOW_DISSECTOR_KEY_ETH_ADDRS, 2605 f->key); 2606 mask = skb_flow_dissector_target(f->dissector, 2607 FLOW_DISSECTOR_KEY_ETH_ADDRS, 2608 f->mask); 2609 2610 if (!is_zero_ether_addr(mask->dst)) { 2611 if (!is_broadcast_ether_addr(mask->dst)) { 2612 NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address"); 2613 return -EINVAL; 2614 } 2615 2616 input->filter.match_flags |= 2617 IGB_FILTER_FLAG_DST_MAC_ADDR; 2618 ether_addr_copy(input->filter.dst_addr, key->dst); 2619 } 2620 2621 if (!is_zero_ether_addr(mask->src)) { 2622 if (!is_broadcast_ether_addr(mask->src)) { 2623 NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address"); 2624 return -EINVAL; 2625 } 2626 2627 input->filter.match_flags |= 2628 IGB_FILTER_FLAG_SRC_MAC_ADDR; 2629 ether_addr_copy(input->filter.src_addr, key->src); 2630 } 2631 } 2632 2633 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { 2634 struct flow_dissector_key_basic *key, *mask; 2635 2636 key = skb_flow_dissector_target(f->dissector, 2637 FLOW_DISSECTOR_KEY_BASIC, 2638 f->key); 2639 mask = skb_flow_dissector_target(f->dissector, 2640 FLOW_DISSECTOR_KEY_BASIC, 2641 f->mask); 2642 2643 if (mask->n_proto) { 2644 if (mask->n_proto != ETHER_TYPE_FULL_MASK) { 2645 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter"); 2646 return -EINVAL; 2647 } 2648 2649 input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE; 2650 input->filter.etype = key->n_proto; 2651 } 2652 } 2653 2654 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { 2655 struct flow_dissector_key_vlan *key, *mask; 2656 2657 key = skb_flow_dissector_target(f->dissector, 2658 FLOW_DISSECTOR_KEY_VLAN, 2659 f->key); 2660 mask = skb_flow_dissector_target(f->dissector, 2661 FLOW_DISSECTOR_KEY_VLAN, 2662 f->mask); 2663 2664 if (mask->vlan_priority) { 2665 if (mask->vlan_priority != VLAN_PRIO_FULL_MASK) { 2666 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority"); 2667 return -EINVAL; 2668 } 2669 2670 input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI; 2671 input->filter.vlan_tci = key->vlan_priority; 2672 } 2673 } 2674 2675 input->action = traffic_class; 2676 input->cookie = f->cookie; 2677 2678 return 0; 2679 } 2680 2681 static int igb_configure_clsflower(struct igb_adapter *adapter, 2682 struct tc_cls_flower_offload *cls_flower) 2683 { 2684 struct netlink_ext_ack *extack = cls_flower->common.extack; 2685 struct igb_nfc_filter *filter, *f; 2686 int err, tc; 2687 2688 tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); 2689 if (tc < 0) { 2690 NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class"); 2691 return -EINVAL; 2692 } 2693 2694 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 2695 if (!filter) 2696 return -ENOMEM; 2697 2698 err = igb_parse_cls_flower(adapter, cls_flower, tc, filter); 2699 if (err < 0) 2700 goto err_parse; 2701 2702 spin_lock(&adapter->nfc_lock); 2703 2704 hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) { 2705 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) { 2706 err = -EEXIST; 2707 NL_SET_ERR_MSG_MOD(extack, 2708 "This filter is already set in ethtool"); 2709 goto err_locked; 2710 } 2711 } 2712 2713 hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) { 2714 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) { 2715 err = -EEXIST; 2716 NL_SET_ERR_MSG_MOD(extack, 2717 "This filter is already set in cls_flower"); 2718 goto err_locked; 2719 } 2720 } 2721 2722 err = igb_add_filter(adapter, filter); 2723 if (err < 0) { 2724 NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter"); 2725 goto err_locked; 2726 } 2727 2728 hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list); 2729 2730 spin_unlock(&adapter->nfc_lock); 2731 2732 return 0; 2733 2734 err_locked: 2735 spin_unlock(&adapter->nfc_lock); 2736 2737 err_parse: 2738 kfree(filter); 2739 2740 return err; 2741 } 2742 2743 static int igb_delete_clsflower(struct igb_adapter *adapter, 2744 struct tc_cls_flower_offload *cls_flower) 2745 { 2746 struct igb_nfc_filter *filter; 2747 int err; 2748 2749 spin_lock(&adapter->nfc_lock); 2750 2751 hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node) 2752 if (filter->cookie == cls_flower->cookie) 2753 break; 2754 2755 if (!filter) { 2756 err = -ENOENT; 2757 goto out; 2758 } 2759 2760 err = igb_erase_filter(adapter, filter); 2761 if (err < 0) 2762 goto out; 2763 2764 hlist_del(&filter->nfc_node); 2765 kfree(filter); 2766 2767 out: 2768 spin_unlock(&adapter->nfc_lock); 2769 2770 return err; 2771 } 2772 2773 static int igb_setup_tc_cls_flower(struct igb_adapter *adapter, 2774 struct tc_cls_flower_offload *cls_flower) 2775 { 2776 switch (cls_flower->command) { 2777 case TC_CLSFLOWER_REPLACE: 2778 return igb_configure_clsflower(adapter, cls_flower); 2779 case TC_CLSFLOWER_DESTROY: 2780 return igb_delete_clsflower(adapter, cls_flower); 2781 case TC_CLSFLOWER_STATS: 2782 return -EOPNOTSUPP; 2783 default: 2784 return -EOPNOTSUPP; 2785 } 2786 } 2787 2788 static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 2789 void *cb_priv) 2790 { 2791 struct igb_adapter *adapter = cb_priv; 2792 2793 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) 2794 return -EOPNOTSUPP; 2795 2796 switch (type) { 2797 case TC_SETUP_CLSFLOWER: 2798 return igb_setup_tc_cls_flower(adapter, type_data); 2799 2800 default: 2801 return -EOPNOTSUPP; 2802 } 2803 } 2804 2805 static int igb_setup_tc_block(struct igb_adapter *adapter, 2806 struct tc_block_offload *f) 2807 { 2808 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 2809 return -EOPNOTSUPP; 2810 2811 switch (f->command) { 2812 case TC_BLOCK_BIND: 2813 return tcf_block_cb_register(f->block, igb_setup_tc_block_cb, 2814 adapter, adapter, f->extack); 2815 case TC_BLOCK_UNBIND: 2816 tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb, 2817 adapter); 2818 return 0; 2819 default: 2820 return -EOPNOTSUPP; 2821 } 2822 } 2823 2824 static int igb_offload_txtime(struct igb_adapter *adapter, 2825 struct tc_etf_qopt_offload *qopt) 2826 { 2827 struct e1000_hw *hw = &adapter->hw; 2828 int err; 2829 2830 /* Launchtime offloading is only supported by i210 controller. */ 2831 if (hw->mac.type != e1000_i210) 2832 return -EOPNOTSUPP; 2833 2834 /* Launchtime offloading is only supported by queues 0 and 1. */ 2835 if (qopt->queue < 0 || qopt->queue > 1) 2836 return -EINVAL; 2837 2838 err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable); 2839 if (err) 2840 return err; 2841 2842 igb_offload_apply(adapter, qopt->queue); 2843 2844 return 0; 2845 } 2846 2847 static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, 2848 void *type_data) 2849 { 2850 struct igb_adapter *adapter = netdev_priv(dev); 2851 2852 switch (type) { 2853 case TC_SETUP_QDISC_CBS: 2854 return igb_offload_cbs(adapter, type_data); 2855 case TC_SETUP_BLOCK: 2856 return igb_setup_tc_block(adapter, type_data); 2857 case TC_SETUP_QDISC_ETF: 2858 return igb_offload_txtime(adapter, type_data); 2859 2860 default: 2861 return -EOPNOTSUPP; 2862 } 2863 } 2864 2865 static const struct net_device_ops igb_netdev_ops = { 2866 .ndo_open = igb_open, 2867 .ndo_stop = igb_close, 2868 .ndo_start_xmit = igb_xmit_frame, 2869 .ndo_get_stats64 = igb_get_stats64, 2870 .ndo_set_rx_mode = igb_set_rx_mode, 2871 .ndo_set_mac_address = igb_set_mac, 2872 .ndo_change_mtu = igb_change_mtu, 2873 .ndo_do_ioctl = igb_ioctl, 2874 .ndo_tx_timeout = igb_tx_timeout, 2875 .ndo_validate_addr = eth_validate_addr, 2876 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid, 2877 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, 2878 .ndo_set_vf_mac = igb_ndo_set_vf_mac, 2879 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, 2880 .ndo_set_vf_rate = igb_ndo_set_vf_bw, 2881 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, 2882 .ndo_set_vf_trust = igb_ndo_set_vf_trust, 2883 .ndo_get_vf_config = igb_ndo_get_vf_config, 2884 #ifdef CONFIG_NET_POLL_CONTROLLER 2885 .ndo_poll_controller = igb_netpoll, 2886 #endif 2887 .ndo_fix_features = igb_fix_features, 2888 .ndo_set_features = igb_set_features, 2889 .ndo_fdb_add = igb_ndo_fdb_add, 2890 .ndo_features_check = igb_features_check, 2891 .ndo_setup_tc = igb_setup_tc, 2892 }; 2893 2894 /** 2895 * igb_set_fw_version - Configure version string for ethtool 2896 * @adapter: adapter struct 2897 **/ 2898 void igb_set_fw_version(struct igb_adapter *adapter) 2899 { 2900 struct e1000_hw *hw = &adapter->hw; 2901 struct e1000_fw_version fw; 2902 2903 igb_get_fw_version(hw, &fw); 2904 2905 switch (hw->mac.type) { 2906 case e1000_i210: 2907 case e1000_i211: 2908 if (!(igb_get_flash_presence_i210(hw))) { 2909 snprintf(adapter->fw_version, 2910 sizeof(adapter->fw_version), 2911 "%2d.%2d-%d", 2912 fw.invm_major, fw.invm_minor, 2913 fw.invm_img_type); 2914 break; 2915 } 2916 /* fall through */ 2917 default: 2918 /* if option is rom valid, display its version too */ 2919 if (fw.or_valid) { 2920 snprintf(adapter->fw_version, 2921 sizeof(adapter->fw_version), 2922 "%d.%d, 0x%08x, %d.%d.%d", 2923 fw.eep_major, fw.eep_minor, fw.etrack_id, 2924 fw.or_major, fw.or_build, fw.or_patch); 2925 /* no option rom */ 2926 } else if (fw.etrack_id != 0X0000) { 2927 snprintf(adapter->fw_version, 2928 sizeof(adapter->fw_version), 2929 "%d.%d, 0x%08x", 2930 fw.eep_major, fw.eep_minor, fw.etrack_id); 2931 } else { 2932 snprintf(adapter->fw_version, 2933 sizeof(adapter->fw_version), 2934 "%d.%d.%d", 2935 fw.eep_major, fw.eep_minor, fw.eep_build); 2936 } 2937 break; 2938 } 2939 } 2940 2941 /** 2942 * igb_init_mas - init Media Autosense feature if enabled in the NVM 2943 * 2944 * @adapter: adapter struct 2945 **/ 2946 static void igb_init_mas(struct igb_adapter *adapter) 2947 { 2948 struct e1000_hw *hw = &adapter->hw; 2949 u16 eeprom_data; 2950 2951 hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data); 2952 switch (hw->bus.func) { 2953 case E1000_FUNC_0: 2954 if (eeprom_data & IGB_MAS_ENABLE_0) { 2955 adapter->flags |= IGB_FLAG_MAS_ENABLE; 2956 netdev_info(adapter->netdev, 2957 "MAS: Enabling Media Autosense for port %d\n", 2958 hw->bus.func); 2959 } 2960 break; 2961 case E1000_FUNC_1: 2962 if (eeprom_data & IGB_MAS_ENABLE_1) { 2963 adapter->flags |= IGB_FLAG_MAS_ENABLE; 2964 netdev_info(adapter->netdev, 2965 "MAS: Enabling Media Autosense for port %d\n", 2966 hw->bus.func); 2967 } 2968 break; 2969 case E1000_FUNC_2: 2970 if (eeprom_data & IGB_MAS_ENABLE_2) { 2971 adapter->flags |= IGB_FLAG_MAS_ENABLE; 2972 netdev_info(adapter->netdev, 2973 "MAS: Enabling Media Autosense for port %d\n", 2974 hw->bus.func); 2975 } 2976 break; 2977 case E1000_FUNC_3: 2978 if (eeprom_data & IGB_MAS_ENABLE_3) { 2979 adapter->flags |= IGB_FLAG_MAS_ENABLE; 2980 netdev_info(adapter->netdev, 2981 "MAS: Enabling Media Autosense for port %d\n", 2982 hw->bus.func); 2983 } 2984 break; 2985 default: 2986 /* Shouldn't get here */ 2987 netdev_err(adapter->netdev, 2988 "MAS: Invalid port configuration, returning\n"); 2989 break; 2990 } 2991 } 2992 2993 /** 2994 * igb_init_i2c - Init I2C interface 2995 * @adapter: pointer to adapter structure 2996 **/ 2997 static s32 igb_init_i2c(struct igb_adapter *adapter) 2998 { 2999 s32 status = 0; 3000 3001 /* I2C interface supported on i350 devices */ 3002 if (adapter->hw.mac.type != e1000_i350) 3003 return 0; 3004 3005 /* Initialize the i2c bus which is controlled by the registers. 3006 * This bus will use the i2c_algo_bit structue that implements 3007 * the protocol through toggling of the 4 bits in the register. 3008 */ 3009 adapter->i2c_adap.owner = THIS_MODULE; 3010 adapter->i2c_algo = igb_i2c_algo; 3011 adapter->i2c_algo.data = adapter; 3012 adapter->i2c_adap.algo_data = &adapter->i2c_algo; 3013 adapter->i2c_adap.dev.parent = &adapter->pdev->dev; 3014 strlcpy(adapter->i2c_adap.name, "igb BB", 3015 sizeof(adapter->i2c_adap.name)); 3016 status = i2c_bit_add_bus(&adapter->i2c_adap); 3017 return status; 3018 } 3019 3020 /** 3021 * igb_probe - Device Initialization Routine 3022 * @pdev: PCI device information struct 3023 * @ent: entry in igb_pci_tbl 3024 * 3025 * Returns 0 on success, negative on failure 3026 * 3027 * igb_probe initializes an adapter identified by a pci_dev structure. 3028 * The OS initialization, configuring of the adapter private structure, 3029 * and a hardware reset occur. 3030 **/ 3031 static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3032 { 3033 struct net_device *netdev; 3034 struct igb_adapter *adapter; 3035 struct e1000_hw *hw; 3036 u16 eeprom_data = 0; 3037 s32 ret_val; 3038 static int global_quad_port_a; /* global quad port a indication */ 3039 const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; 3040 int err, pci_using_dac; 3041 u8 part_str[E1000_PBANUM_LENGTH]; 3042 3043 /* Catch broken hardware that put the wrong VF device ID in 3044 * the PCIe SR-IOV capability. 3045 */ 3046 if (pdev->is_virtfn) { 3047 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", 3048 pci_name(pdev), pdev->vendor, pdev->device); 3049 return -EINVAL; 3050 } 3051 3052 err = pci_enable_device_mem(pdev); 3053 if (err) 3054 return err; 3055 3056 pci_using_dac = 0; 3057 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3058 if (!err) { 3059 pci_using_dac = 1; 3060 } else { 3061 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3062 if (err) { 3063 dev_err(&pdev->dev, 3064 "No usable DMA configuration, aborting\n"); 3065 goto err_dma; 3066 } 3067 } 3068 3069 err = pci_request_mem_regions(pdev, igb_driver_name); 3070 if (err) 3071 goto err_pci_reg; 3072 3073 pci_enable_pcie_error_reporting(pdev); 3074 3075 pci_set_master(pdev); 3076 pci_save_state(pdev); 3077 3078 err = -ENOMEM; 3079 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), 3080 IGB_MAX_TX_QUEUES); 3081 if (!netdev) 3082 goto err_alloc_etherdev; 3083 3084 SET_NETDEV_DEV(netdev, &pdev->dev); 3085 3086 pci_set_drvdata(pdev, netdev); 3087 adapter = netdev_priv(netdev); 3088 adapter->netdev = netdev; 3089 adapter->pdev = pdev; 3090 hw = &adapter->hw; 3091 hw->back = adapter; 3092 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3093 3094 err = -EIO; 3095 adapter->io_addr = pci_iomap(pdev, 0, 0); 3096 if (!adapter->io_addr) 3097 goto err_ioremap; 3098 /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */ 3099 hw->hw_addr = adapter->io_addr; 3100 3101 netdev->netdev_ops = &igb_netdev_ops; 3102 igb_set_ethtool_ops(netdev); 3103 netdev->watchdog_timeo = 5 * HZ; 3104 3105 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 3106 3107 netdev->mem_start = pci_resource_start(pdev, 0); 3108 netdev->mem_end = pci_resource_end(pdev, 0); 3109 3110 /* PCI config space info */ 3111 hw->vendor_id = pdev->vendor; 3112 hw->device_id = pdev->device; 3113 hw->revision_id = pdev->revision; 3114 hw->subsystem_vendor_id = pdev->subsystem_vendor; 3115 hw->subsystem_device_id = pdev->subsystem_device; 3116 3117 /* Copy the default MAC, PHY and NVM function pointers */ 3118 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); 3119 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 3120 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); 3121 /* Initialize skew-specific constants */ 3122 err = ei->get_invariants(hw); 3123 if (err) 3124 goto err_sw_init; 3125 3126 /* setup the private structure */ 3127 err = igb_sw_init(adapter); 3128 if (err) 3129 goto err_sw_init; 3130 3131 igb_get_bus_info_pcie(hw); 3132 3133 hw->phy.autoneg_wait_to_complete = false; 3134 3135 /* Copper options */ 3136 if (hw->phy.media_type == e1000_media_type_copper) { 3137 hw->phy.mdix = AUTO_ALL_MODES; 3138 hw->phy.disable_polarity_correction = false; 3139 hw->phy.ms_type = e1000_ms_hw_default; 3140 } 3141 3142 if (igb_check_reset_block(hw)) 3143 dev_info(&pdev->dev, 3144 "PHY reset is blocked due to SOL/IDER session.\n"); 3145 3146 /* features is initialized to 0 in allocation, it might have bits 3147 * set by igb_sw_init so we should use an or instead of an 3148 * assignment. 3149 */ 3150 netdev->features |= NETIF_F_SG | 3151 NETIF_F_TSO | 3152 NETIF_F_TSO6 | 3153 NETIF_F_RXHASH | 3154 NETIF_F_RXCSUM | 3155 NETIF_F_HW_CSUM; 3156 3157 if (hw->mac.type >= e1000_82576) 3158 netdev->features |= NETIF_F_SCTP_CRC; 3159 3160 if (hw->mac.type >= e1000_i350) 3161 netdev->features |= NETIF_F_HW_TC; 3162 3163 #define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ 3164 NETIF_F_GSO_GRE_CSUM | \ 3165 NETIF_F_GSO_IPXIP4 | \ 3166 NETIF_F_GSO_IPXIP6 | \ 3167 NETIF_F_GSO_UDP_TUNNEL | \ 3168 NETIF_F_GSO_UDP_TUNNEL_CSUM) 3169 3170 netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES; 3171 netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES; 3172 3173 /* copy netdev features into list of user selectable features */ 3174 netdev->hw_features |= netdev->features | 3175 NETIF_F_HW_VLAN_CTAG_RX | 3176 NETIF_F_HW_VLAN_CTAG_TX | 3177 NETIF_F_RXALL; 3178 3179 if (hw->mac.type >= e1000_i350) 3180 netdev->hw_features |= NETIF_F_NTUPLE; 3181 3182 if (pci_using_dac) 3183 netdev->features |= NETIF_F_HIGHDMA; 3184 3185 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; 3186 netdev->mpls_features |= NETIF_F_HW_CSUM; 3187 netdev->hw_enc_features |= netdev->vlan_features; 3188 3189 /* set this bit last since it cannot be part of vlan_features */ 3190 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | 3191 NETIF_F_HW_VLAN_CTAG_RX | 3192 NETIF_F_HW_VLAN_CTAG_TX; 3193 3194 netdev->priv_flags |= IFF_SUPP_NOFCS; 3195 3196 netdev->priv_flags |= IFF_UNICAST_FLT; 3197 3198 /* MTU range: 68 - 9216 */ 3199 netdev->min_mtu = ETH_MIN_MTU; 3200 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; 3201 3202 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); 3203 3204 /* before reading the NVM, reset the controller to put the device in a 3205 * known good starting state 3206 */ 3207 hw->mac.ops.reset_hw(hw); 3208 3209 /* make sure the NVM is good , i211/i210 parts can have special NVM 3210 * that doesn't contain a checksum 3211 */ 3212 switch (hw->mac.type) { 3213 case e1000_i210: 3214 case e1000_i211: 3215 if (igb_get_flash_presence_i210(hw)) { 3216 if (hw->nvm.ops.validate(hw) < 0) { 3217 dev_err(&pdev->dev, 3218 "The NVM Checksum Is Not Valid\n"); 3219 err = -EIO; 3220 goto err_eeprom; 3221 } 3222 } 3223 break; 3224 default: 3225 if (hw->nvm.ops.validate(hw) < 0) { 3226 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); 3227 err = -EIO; 3228 goto err_eeprom; 3229 } 3230 break; 3231 } 3232 3233 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { 3234 /* copy the MAC address out of the NVM */ 3235 if (hw->mac.ops.read_mac_addr(hw)) 3236 dev_err(&pdev->dev, "NVM Read Error\n"); 3237 } 3238 3239 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); 3240 3241 if (!is_valid_ether_addr(netdev->dev_addr)) { 3242 dev_err(&pdev->dev, "Invalid MAC Address\n"); 3243 err = -EIO; 3244 goto err_eeprom; 3245 } 3246 3247 igb_set_default_mac_filter(adapter); 3248 3249 /* get firmware version for ethtool -i */ 3250 igb_set_fw_version(adapter); 3251 3252 /* configure RXPBSIZE and TXPBSIZE */ 3253 if (hw->mac.type == e1000_i210) { 3254 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT); 3255 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT); 3256 } 3257 3258 timer_setup(&adapter->watchdog_timer, igb_watchdog, 0); 3259 timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0); 3260 3261 INIT_WORK(&adapter->reset_task, igb_reset_task); 3262 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); 3263 3264 /* Initialize link properties that are user-changeable */ 3265 adapter->fc_autoneg = true; 3266 hw->mac.autoneg = true; 3267 hw->phy.autoneg_advertised = 0x2f; 3268 3269 hw->fc.requested_mode = e1000_fc_default; 3270 hw->fc.current_mode = e1000_fc_default; 3271 3272 igb_validate_mdi_setting(hw); 3273 3274 /* By default, support wake on port A */ 3275 if (hw->bus.func == 0) 3276 adapter->flags |= IGB_FLAG_WOL_SUPPORTED; 3277 3278 /* Check the NVM for wake support on non-port A ports */ 3279 if (hw->mac.type >= e1000_82580) 3280 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + 3281 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, 3282 &eeprom_data); 3283 else if (hw->bus.func == 1) 3284 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 3285 3286 if (eeprom_data & IGB_EEPROM_APME) 3287 adapter->flags |= IGB_FLAG_WOL_SUPPORTED; 3288 3289 /* now that we have the eeprom settings, apply the special cases where 3290 * the eeprom may be wrong or the board simply won't support wake on 3291 * lan on a particular port 3292 */ 3293 switch (pdev->device) { 3294 case E1000_DEV_ID_82575GB_QUAD_COPPER: 3295 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; 3296 break; 3297 case E1000_DEV_ID_82575EB_FIBER_SERDES: 3298 case E1000_DEV_ID_82576_FIBER: 3299 case E1000_DEV_ID_82576_SERDES: 3300 /* Wake events only supported on port A for dual fiber 3301 * regardless of eeprom setting 3302 */ 3303 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) 3304 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; 3305 break; 3306 case E1000_DEV_ID_82576_QUAD_COPPER: 3307 case E1000_DEV_ID_82576_QUAD_COPPER_ET2: 3308 /* if quad port adapter, disable WoL on all but port A */ 3309 if (global_quad_port_a != 0) 3310 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; 3311 else 3312 adapter->flags |= IGB_FLAG_QUAD_PORT_A; 3313 /* Reset for multiple quad port adapters */ 3314 if (++global_quad_port_a == 4) 3315 global_quad_port_a = 0; 3316 break; 3317 default: 3318 /* If the device can't wake, don't set software support */ 3319 if (!device_can_wakeup(&adapter->pdev->dev)) 3320 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; 3321 } 3322 3323 /* initialize the wol settings based on the eeprom settings */ 3324 if (adapter->flags & IGB_FLAG_WOL_SUPPORTED) 3325 adapter->wol |= E1000_WUFC_MAG; 3326 3327 /* Some vendors want WoL disabled by default, but still supported */ 3328 if ((hw->mac.type == e1000_i350) && 3329 (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) { 3330 adapter->flags |= IGB_FLAG_WOL_SUPPORTED; 3331 adapter->wol = 0; 3332 } 3333 3334 /* Some vendors want the ability to Use the EEPROM setting as 3335 * enable/disable only, and not for capability 3336 */ 3337 if (((hw->mac.type == e1000_i350) || 3338 (hw->mac.type == e1000_i354)) && 3339 (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) { 3340 adapter->flags |= IGB_FLAG_WOL_SUPPORTED; 3341 adapter->wol = 0; 3342 } 3343 if (hw->mac.type == e1000_i350) { 3344 if (((pdev->subsystem_device == 0x5001) || 3345 (pdev->subsystem_device == 0x5002)) && 3346 (hw->bus.func == 0)) { 3347 adapter->flags |= IGB_FLAG_WOL_SUPPORTED; 3348 adapter->wol = 0; 3349 } 3350 if (pdev->subsystem_device == 0x1F52) 3351 adapter->flags |= IGB_FLAG_WOL_SUPPORTED; 3352 } 3353 3354 device_set_wakeup_enable(&adapter->pdev->dev, 3355 adapter->flags & IGB_FLAG_WOL_SUPPORTED); 3356 3357 /* reset the hardware with the new settings */ 3358 igb_reset(adapter); 3359 3360 /* Init the I2C interface */ 3361 err = igb_init_i2c(adapter); 3362 if (err) { 3363 dev_err(&pdev->dev, "failed to init i2c interface\n"); 3364 goto err_eeprom; 3365 } 3366 3367 /* let the f/w know that the h/w is now under the control of the 3368 * driver. 3369 */ 3370 igb_get_hw_control(adapter); 3371 3372 strcpy(netdev->name, "eth%d"); 3373 err = register_netdev(netdev); 3374 if (err) 3375 goto err_register; 3376 3377 /* carrier off reporting is important to ethtool even BEFORE open */ 3378 netif_carrier_off(netdev); 3379 3380 #ifdef CONFIG_IGB_DCA 3381 if (dca_add_requester(&pdev->dev) == 0) { 3382 adapter->flags |= IGB_FLAG_DCA_ENABLED; 3383 dev_info(&pdev->dev, "DCA enabled\n"); 3384 igb_setup_dca(adapter); 3385 } 3386 3387 #endif 3388 #ifdef CONFIG_IGB_HWMON 3389 /* Initialize the thermal sensor on i350 devices. */ 3390 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) { 3391 u16 ets_word; 3392 3393 /* Read the NVM to determine if this i350 device supports an 3394 * external thermal sensor. 3395 */ 3396 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word); 3397 if (ets_word != 0x0000 && ets_word != 0xFFFF) 3398 adapter->ets = true; 3399 else 3400 adapter->ets = false; 3401 if (igb_sysfs_init(adapter)) 3402 dev_err(&pdev->dev, 3403 "failed to allocate sysfs resources\n"); 3404 } else { 3405 adapter->ets = false; 3406 } 3407 #endif 3408 /* Check if Media Autosense is enabled */ 3409 adapter->ei = *ei; 3410 if (hw->dev_spec._82575.mas_capable) 3411 igb_init_mas(adapter); 3412 3413 /* do hw tstamp init after resetting */ 3414 igb_ptp_init(adapter); 3415 3416 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); 3417 /* print bus type/speed/width info, not applicable to i354 */ 3418 if (hw->mac.type != e1000_i354) { 3419 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", 3420 netdev->name, 3421 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : 3422 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : 3423 "unknown"), 3424 ((hw->bus.width == e1000_bus_width_pcie_x4) ? 3425 "Width x4" : 3426 (hw->bus.width == e1000_bus_width_pcie_x2) ? 3427 "Width x2" : 3428 (hw->bus.width == e1000_bus_width_pcie_x1) ? 3429 "Width x1" : "unknown"), netdev->dev_addr); 3430 } 3431 3432 if ((hw->mac.type >= e1000_i210 || 3433 igb_get_flash_presence_i210(hw))) { 3434 ret_val = igb_read_part_string(hw, part_str, 3435 E1000_PBANUM_LENGTH); 3436 } else { 3437 ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND; 3438 } 3439 3440 if (ret_val) 3441 strcpy(part_str, "Unknown"); 3442 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str); 3443 dev_info(&pdev->dev, 3444 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", 3445 (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" : 3446 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", 3447 adapter->num_rx_queues, adapter->num_tx_queues); 3448 if (hw->phy.media_type == e1000_media_type_copper) { 3449 switch (hw->mac.type) { 3450 case e1000_i350: 3451 case e1000_i210: 3452 case e1000_i211: 3453 /* Enable EEE for internal copper PHY devices */ 3454 err = igb_set_eee_i350(hw, true, true); 3455 if ((!err) && 3456 (!hw->dev_spec._82575.eee_disable)) { 3457 adapter->eee_advert = 3458 MDIO_EEE_100TX | MDIO_EEE_1000T; 3459 adapter->flags |= IGB_FLAG_EEE; 3460 } 3461 break; 3462 case e1000_i354: 3463 if ((rd32(E1000_CTRL_EXT) & 3464 E1000_CTRL_EXT_LINK_MODE_SGMII)) { 3465 err = igb_set_eee_i354(hw, true, true); 3466 if ((!err) && 3467 (!hw->dev_spec._82575.eee_disable)) { 3468 adapter->eee_advert = 3469 MDIO_EEE_100TX | MDIO_EEE_1000T; 3470 adapter->flags |= IGB_FLAG_EEE; 3471 } 3472 } 3473 break; 3474 default: 3475 break; 3476 } 3477 } 3478 pm_runtime_put_noidle(&pdev->dev); 3479 return 0; 3480 3481 err_register: 3482 igb_release_hw_control(adapter); 3483 memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap)); 3484 err_eeprom: 3485 if (!igb_check_reset_block(hw)) 3486 igb_reset_phy(hw); 3487 3488 if (hw->flash_address) 3489 iounmap(hw->flash_address); 3490 err_sw_init: 3491 kfree(adapter->mac_table); 3492 kfree(adapter->shadow_vfta); 3493 igb_clear_interrupt_scheme(adapter); 3494 #ifdef CONFIG_PCI_IOV 3495 igb_disable_sriov(pdev); 3496 #endif 3497 pci_iounmap(pdev, adapter->io_addr); 3498 err_ioremap: 3499 free_netdev(netdev); 3500 err_alloc_etherdev: 3501 pci_release_mem_regions(pdev); 3502 err_pci_reg: 3503 err_dma: 3504 pci_disable_device(pdev); 3505 return err; 3506 } 3507 3508 #ifdef CONFIG_PCI_IOV 3509 static int igb_disable_sriov(struct pci_dev *pdev) 3510 { 3511 struct net_device *netdev = pci_get_drvdata(pdev); 3512 struct igb_adapter *adapter = netdev_priv(netdev); 3513 struct e1000_hw *hw = &adapter->hw; 3514 3515 /* reclaim resources allocated to VFs */ 3516 if (adapter->vf_data) { 3517 /* disable iov and allow time for transactions to clear */ 3518 if (pci_vfs_assigned(pdev)) { 3519 dev_warn(&pdev->dev, 3520 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n"); 3521 return -EPERM; 3522 } else { 3523 pci_disable_sriov(pdev); 3524 msleep(500); 3525 } 3526 3527 kfree(adapter->vf_mac_list); 3528 adapter->vf_mac_list = NULL; 3529 kfree(adapter->vf_data); 3530 adapter->vf_data = NULL; 3531 adapter->vfs_allocated_count = 0; 3532 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); 3533 wrfl(); 3534 msleep(100); 3535 dev_info(&pdev->dev, "IOV Disabled\n"); 3536 3537 /* Re-enable DMA Coalescing flag since IOV is turned off */ 3538 adapter->flags |= IGB_FLAG_DMAC; 3539 } 3540 3541 return 0; 3542 } 3543 3544 static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) 3545 { 3546 struct net_device *netdev = pci_get_drvdata(pdev); 3547 struct igb_adapter *adapter = netdev_priv(netdev); 3548 int old_vfs = pci_num_vf(pdev); 3549 struct vf_mac_filter *mac_list; 3550 int err = 0; 3551 int num_vf_mac_filters, i; 3552 3553 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) { 3554 err = -EPERM; 3555 goto out; 3556 } 3557 if (!num_vfs) 3558 goto out; 3559 3560 if (old_vfs) { 3561 dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n", 3562 old_vfs, max_vfs); 3563 adapter->vfs_allocated_count = old_vfs; 3564 } else 3565 adapter->vfs_allocated_count = num_vfs; 3566 3567 adapter->vf_data = kcalloc(adapter->vfs_allocated_count, 3568 sizeof(struct vf_data_storage), GFP_KERNEL); 3569 3570 /* if allocation failed then we do not support SR-IOV */ 3571 if (!adapter->vf_data) { 3572 adapter->vfs_allocated_count = 0; 3573 err = -ENOMEM; 3574 goto out; 3575 } 3576 3577 /* Due to the limited number of RAR entries calculate potential 3578 * number of MAC filters available for the VFs. Reserve entries 3579 * for PF default MAC, PF MAC filters and at least one RAR entry 3580 * for each VF for VF MAC. 3581 */ 3582 num_vf_mac_filters = adapter->hw.mac.rar_entry_count - 3583 (1 + IGB_PF_MAC_FILTERS_RESERVED + 3584 adapter->vfs_allocated_count); 3585 3586 adapter->vf_mac_list = kcalloc(num_vf_mac_filters, 3587 sizeof(struct vf_mac_filter), 3588 GFP_KERNEL); 3589 3590 mac_list = adapter->vf_mac_list; 3591 INIT_LIST_HEAD(&adapter->vf_macs.l); 3592 3593 if (adapter->vf_mac_list) { 3594 /* Initialize list of VF MAC filters */ 3595 for (i = 0; i < num_vf_mac_filters; i++) { 3596 mac_list->vf = -1; 3597 mac_list->free = true; 3598 list_add(&mac_list->l, &adapter->vf_macs.l); 3599 mac_list++; 3600 } 3601 } else { 3602 /* If we could not allocate memory for the VF MAC filters 3603 * we can continue without this feature but warn user. 3604 */ 3605 dev_err(&pdev->dev, 3606 "Unable to allocate memory for VF MAC filter list\n"); 3607 } 3608 3609 /* only call pci_enable_sriov() if no VFs are allocated already */ 3610 if (!old_vfs) { 3611 err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); 3612 if (err) 3613 goto err_out; 3614 } 3615 dev_info(&pdev->dev, "%d VFs allocated\n", 3616 adapter->vfs_allocated_count); 3617 for (i = 0; i < adapter->vfs_allocated_count; i++) 3618 igb_vf_configure(adapter, i); 3619 3620 /* DMA Coalescing is not supported in IOV mode. */ 3621 adapter->flags &= ~IGB_FLAG_DMAC; 3622 goto out; 3623 3624 err_out: 3625 kfree(adapter->vf_mac_list); 3626 adapter->vf_mac_list = NULL; 3627 kfree(adapter->vf_data); 3628 adapter->vf_data = NULL; 3629 adapter->vfs_allocated_count = 0; 3630 out: 3631 return err; 3632 } 3633 3634 #endif 3635 /** 3636 * igb_remove_i2c - Cleanup I2C interface 3637 * @adapter: pointer to adapter structure 3638 **/ 3639 static void igb_remove_i2c(struct igb_adapter *adapter) 3640 { 3641 /* free the adapter bus structure */ 3642 i2c_del_adapter(&adapter->i2c_adap); 3643 } 3644 3645 /** 3646 * igb_remove - Device Removal Routine 3647 * @pdev: PCI device information struct 3648 * 3649 * igb_remove is called by the PCI subsystem to alert the driver 3650 * that it should release a PCI device. The could be caused by a 3651 * Hot-Plug event, or because the driver is going to be removed from 3652 * memory. 3653 **/ 3654 static void igb_remove(struct pci_dev *pdev) 3655 { 3656 struct net_device *netdev = pci_get_drvdata(pdev); 3657 struct igb_adapter *adapter = netdev_priv(netdev); 3658 struct e1000_hw *hw = &adapter->hw; 3659 3660 pm_runtime_get_noresume(&pdev->dev); 3661 #ifdef CONFIG_IGB_HWMON 3662 igb_sysfs_exit(adapter); 3663 #endif 3664 igb_remove_i2c(adapter); 3665 igb_ptp_stop(adapter); 3666 /* The watchdog timer may be rescheduled, so explicitly 3667 * disable watchdog from being rescheduled. 3668 */ 3669 set_bit(__IGB_DOWN, &adapter->state); 3670 del_timer_sync(&adapter->watchdog_timer); 3671 del_timer_sync(&adapter->phy_info_timer); 3672 3673 cancel_work_sync(&adapter->reset_task); 3674 cancel_work_sync(&adapter->watchdog_task); 3675 3676 #ifdef CONFIG_IGB_DCA 3677 if (adapter->flags & IGB_FLAG_DCA_ENABLED) { 3678 dev_info(&pdev->dev, "DCA disabled\n"); 3679 dca_remove_requester(&pdev->dev); 3680 adapter->flags &= ~IGB_FLAG_DCA_ENABLED; 3681 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); 3682 } 3683 #endif 3684 3685 /* Release control of h/w to f/w. If f/w is AMT enabled, this 3686 * would have already happened in close and is redundant. 3687 */ 3688 igb_release_hw_control(adapter); 3689 3690 #ifdef CONFIG_PCI_IOV 3691 igb_disable_sriov(pdev); 3692 #endif 3693 3694 unregister_netdev(netdev); 3695 3696 igb_clear_interrupt_scheme(adapter); 3697 3698 pci_iounmap(pdev, adapter->io_addr); 3699 if (hw->flash_address) 3700 iounmap(hw->flash_address); 3701 pci_release_mem_regions(pdev); 3702 3703 kfree(adapter->mac_table); 3704 kfree(adapter->shadow_vfta); 3705 free_netdev(netdev); 3706 3707 pci_disable_pcie_error_reporting(pdev); 3708 3709 pci_disable_device(pdev); 3710 } 3711 3712 /** 3713 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space 3714 * @adapter: board private structure to initialize 3715 * 3716 * This function initializes the vf specific data storage and then attempts to 3717 * allocate the VFs. The reason for ordering it this way is because it is much 3718 * mor expensive time wise to disable SR-IOV than it is to allocate and free 3719 * the memory for the VFs. 3720 **/ 3721 static void igb_probe_vfs(struct igb_adapter *adapter) 3722 { 3723 #ifdef CONFIG_PCI_IOV 3724 struct pci_dev *pdev = adapter->pdev; 3725 struct e1000_hw *hw = &adapter->hw; 3726 3727 /* Virtualization features not supported on i210 family. */ 3728 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) 3729 return; 3730 3731 /* Of the below we really only want the effect of getting 3732 * IGB_FLAG_HAS_MSIX set (if available), without which 3733 * igb_enable_sriov() has no effect. 3734 */ 3735 igb_set_interrupt_capability(adapter, true); 3736 igb_reset_interrupt_capability(adapter); 3737 3738 pci_sriov_set_totalvfs(pdev, 7); 3739 igb_enable_sriov(pdev, max_vfs); 3740 3741 #endif /* CONFIG_PCI_IOV */ 3742 } 3743 3744 unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter) 3745 { 3746 struct e1000_hw *hw = &adapter->hw; 3747 unsigned int max_rss_queues; 3748 3749 /* Determine the maximum number of RSS queues supported. */ 3750 switch (hw->mac.type) { 3751 case e1000_i211: 3752 max_rss_queues = IGB_MAX_RX_QUEUES_I211; 3753 break; 3754 case e1000_82575: 3755 case e1000_i210: 3756 max_rss_queues = IGB_MAX_RX_QUEUES_82575; 3757 break; 3758 case e1000_i350: 3759 /* I350 cannot do RSS and SR-IOV at the same time */ 3760 if (!!adapter->vfs_allocated_count) { 3761 max_rss_queues = 1; 3762 break; 3763 } 3764 /* fall through */ 3765 case e1000_82576: 3766 if (!!adapter->vfs_allocated_count) { 3767 max_rss_queues = 2; 3768 break; 3769 } 3770 /* fall through */ 3771 case e1000_82580: 3772 case e1000_i354: 3773 default: 3774 max_rss_queues = IGB_MAX_RX_QUEUES; 3775 break; 3776 } 3777 3778 return max_rss_queues; 3779 } 3780 3781 static void igb_init_queue_configuration(struct igb_adapter *adapter) 3782 { 3783 u32 max_rss_queues; 3784 3785 max_rss_queues = igb_get_max_rss_queues(adapter); 3786 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); 3787 3788 igb_set_flag_queue_pairs(adapter, max_rss_queues); 3789 } 3790 3791 void igb_set_flag_queue_pairs(struct igb_adapter *adapter, 3792 const u32 max_rss_queues) 3793 { 3794 struct e1000_hw *hw = &adapter->hw; 3795 3796 /* Determine if we need to pair queues. */ 3797 switch (hw->mac.type) { 3798 case e1000_82575: 3799 case e1000_i211: 3800 /* Device supports enough interrupts without queue pairing. */ 3801 break; 3802 case e1000_82576: 3803 case e1000_82580: 3804 case e1000_i350: 3805 case e1000_i354: 3806 case e1000_i210: 3807 default: 3808 /* If rss_queues > half of max_rss_queues, pair the queues in 3809 * order to conserve interrupts due to limited supply. 3810 */ 3811 if (adapter->rss_queues > (max_rss_queues / 2)) 3812 adapter->flags |= IGB_FLAG_QUEUE_PAIRS; 3813 else 3814 adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS; 3815 break; 3816 } 3817 } 3818 3819 /** 3820 * igb_sw_init - Initialize general software structures (struct igb_adapter) 3821 * @adapter: board private structure to initialize 3822 * 3823 * igb_sw_init initializes the Adapter private data structure. 3824 * Fields are initialized based on PCI device information and 3825 * OS network device settings (MTU size). 3826 **/ 3827 static int igb_sw_init(struct igb_adapter *adapter) 3828 { 3829 struct e1000_hw *hw = &adapter->hw; 3830 struct net_device *netdev = adapter->netdev; 3831 struct pci_dev *pdev = adapter->pdev; 3832 3833 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); 3834 3835 /* set default ring sizes */ 3836 adapter->tx_ring_count = IGB_DEFAULT_TXD; 3837 adapter->rx_ring_count = IGB_DEFAULT_RXD; 3838 3839 /* set default ITR values */ 3840 adapter->rx_itr_setting = IGB_DEFAULT_ITR; 3841 adapter->tx_itr_setting = IGB_DEFAULT_ITR; 3842 3843 /* set default work limits */ 3844 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; 3845 3846 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + 3847 VLAN_HLEN; 3848 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 3849 3850 spin_lock_init(&adapter->nfc_lock); 3851 spin_lock_init(&adapter->stats64_lock); 3852 #ifdef CONFIG_PCI_IOV 3853 switch (hw->mac.type) { 3854 case e1000_82576: 3855 case e1000_i350: 3856 if (max_vfs > 7) { 3857 dev_warn(&pdev->dev, 3858 "Maximum of 7 VFs per PF, using max\n"); 3859 max_vfs = adapter->vfs_allocated_count = 7; 3860 } else 3861 adapter->vfs_allocated_count = max_vfs; 3862 if (adapter->vfs_allocated_count) 3863 dev_warn(&pdev->dev, 3864 "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n"); 3865 break; 3866 default: 3867 break; 3868 } 3869 #endif /* CONFIG_PCI_IOV */ 3870 3871 /* Assume MSI-X interrupts, will be checked during IRQ allocation */ 3872 adapter->flags |= IGB_FLAG_HAS_MSIX; 3873 3874 adapter->mac_table = kcalloc(hw->mac.rar_entry_count, 3875 sizeof(struct igb_mac_addr), 3876 GFP_ATOMIC); 3877 if (!adapter->mac_table) 3878 return -ENOMEM; 3879 3880 igb_probe_vfs(adapter); 3881 3882 igb_init_queue_configuration(adapter); 3883 3884 /* Setup and initialize a copy of the hw vlan table array */ 3885 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), 3886 GFP_ATOMIC); 3887 if (!adapter->shadow_vfta) 3888 return -ENOMEM; 3889 3890 /* This call may decrease the number of queues */ 3891 if (igb_init_interrupt_scheme(adapter, true)) { 3892 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 3893 return -ENOMEM; 3894 } 3895 3896 /* Explicitly disable IRQ since the NIC can be in any state. */ 3897 igb_irq_disable(adapter); 3898 3899 if (hw->mac.type >= e1000_i350) 3900 adapter->flags &= ~IGB_FLAG_DMAC; 3901 3902 set_bit(__IGB_DOWN, &adapter->state); 3903 return 0; 3904 } 3905 3906 /** 3907 * igb_open - Called when a network interface is made active 3908 * @netdev: network interface device structure 3909 * 3910 * Returns 0 on success, negative value on failure 3911 * 3912 * The open entry point is called when a network interface is made 3913 * active by the system (IFF_UP). At this point all resources needed 3914 * for transmit and receive operations are allocated, the interrupt 3915 * handler is registered with the OS, the watchdog timer is started, 3916 * and the stack is notified that the interface is ready. 3917 **/ 3918 static int __igb_open(struct net_device *netdev, bool resuming) 3919 { 3920 struct igb_adapter *adapter = netdev_priv(netdev); 3921 struct e1000_hw *hw = &adapter->hw; 3922 struct pci_dev *pdev = adapter->pdev; 3923 int err; 3924 int i; 3925 3926 /* disallow open during test */ 3927 if (test_bit(__IGB_TESTING, &adapter->state)) { 3928 WARN_ON(resuming); 3929 return -EBUSY; 3930 } 3931 3932 if (!resuming) 3933 pm_runtime_get_sync(&pdev->dev); 3934 3935 netif_carrier_off(netdev); 3936 3937 /* allocate transmit descriptors */ 3938 err = igb_setup_all_tx_resources(adapter); 3939 if (err) 3940 goto err_setup_tx; 3941 3942 /* allocate receive descriptors */ 3943 err = igb_setup_all_rx_resources(adapter); 3944 if (err) 3945 goto err_setup_rx; 3946 3947 igb_power_up_link(adapter); 3948 3949 /* before we allocate an interrupt, we must be ready to handle it. 3950 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 3951 * as soon as we call pci_request_irq, so we have to setup our 3952 * clean_rx handler before we do so. 3953 */ 3954 igb_configure(adapter); 3955 3956 err = igb_request_irq(adapter); 3957 if (err) 3958 goto err_req_irq; 3959 3960 /* Notify the stack of the actual queue counts. */ 3961 err = netif_set_real_num_tx_queues(adapter->netdev, 3962 adapter->num_tx_queues); 3963 if (err) 3964 goto err_set_queues; 3965 3966 err = netif_set_real_num_rx_queues(adapter->netdev, 3967 adapter->num_rx_queues); 3968 if (err) 3969 goto err_set_queues; 3970 3971 /* From here on the code is the same as igb_up() */ 3972 clear_bit(__IGB_DOWN, &adapter->state); 3973 3974 for (i = 0; i < adapter->num_q_vectors; i++) 3975 napi_enable(&(adapter->q_vector[i]->napi)); 3976 3977 /* Clear any pending interrupts. */ 3978 rd32(E1000_TSICR); 3979 rd32(E1000_ICR); 3980 3981 igb_irq_enable(adapter); 3982 3983 /* notify VFs that reset has been completed */ 3984 if (adapter->vfs_allocated_count) { 3985 u32 reg_data = rd32(E1000_CTRL_EXT); 3986 3987 reg_data |= E1000_CTRL_EXT_PFRSTD; 3988 wr32(E1000_CTRL_EXT, reg_data); 3989 } 3990 3991 netif_tx_start_all_queues(netdev); 3992 3993 if (!resuming) 3994 pm_runtime_put(&pdev->dev); 3995 3996 /* start the watchdog. */ 3997 hw->mac.get_link_status = 1; 3998 schedule_work(&adapter->watchdog_task); 3999 4000 return 0; 4001 4002 err_set_queues: 4003 igb_free_irq(adapter); 4004 err_req_irq: 4005 igb_release_hw_control(adapter); 4006 igb_power_down_link(adapter); 4007 igb_free_all_rx_resources(adapter); 4008 err_setup_rx: 4009 igb_free_all_tx_resources(adapter); 4010 err_setup_tx: 4011 igb_reset(adapter); 4012 if (!resuming) 4013 pm_runtime_put(&pdev->dev); 4014 4015 return err; 4016 } 4017 4018 int igb_open(struct net_device *netdev) 4019 { 4020 return __igb_open(netdev, false); 4021 } 4022 4023 /** 4024 * igb_close - Disables a network interface 4025 * @netdev: network interface device structure 4026 * 4027 * Returns 0, this is not allowed to fail 4028 * 4029 * The close entry point is called when an interface is de-activated 4030 * by the OS. The hardware is still under the driver's control, but 4031 * needs to be disabled. A global MAC reset is issued to stop the 4032 * hardware, and all transmit and receive resources are freed. 4033 **/ 4034 static int __igb_close(struct net_device *netdev, bool suspending) 4035 { 4036 struct igb_adapter *adapter = netdev_priv(netdev); 4037 struct pci_dev *pdev = adapter->pdev; 4038 4039 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); 4040 4041 if (!suspending) 4042 pm_runtime_get_sync(&pdev->dev); 4043 4044 igb_down(adapter); 4045 igb_free_irq(adapter); 4046 4047 igb_free_all_tx_resources(adapter); 4048 igb_free_all_rx_resources(adapter); 4049 4050 if (!suspending) 4051 pm_runtime_put_sync(&pdev->dev); 4052 return 0; 4053 } 4054 4055 int igb_close(struct net_device *netdev) 4056 { 4057 if (netif_device_present(netdev) || netdev->dismantle) 4058 return __igb_close(netdev, false); 4059 return 0; 4060 } 4061 4062 /** 4063 * igb_setup_tx_resources - allocate Tx resources (Descriptors) 4064 * @tx_ring: tx descriptor ring (for a specific queue) to setup 4065 * 4066 * Return 0 on success, negative on failure 4067 **/ 4068 int igb_setup_tx_resources(struct igb_ring *tx_ring) 4069 { 4070 struct device *dev = tx_ring->dev; 4071 int size; 4072 4073 size = sizeof(struct igb_tx_buffer) * tx_ring->count; 4074 4075 tx_ring->tx_buffer_info = vmalloc(size); 4076 if (!tx_ring->tx_buffer_info) 4077 goto err; 4078 4079 /* round up to nearest 4K */ 4080 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 4081 tx_ring->size = ALIGN(tx_ring->size, 4096); 4082 4083 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 4084 &tx_ring->dma, GFP_KERNEL); 4085 if (!tx_ring->desc) 4086 goto err; 4087 4088 tx_ring->next_to_use = 0; 4089 tx_ring->next_to_clean = 0; 4090 4091 return 0; 4092 4093 err: 4094 vfree(tx_ring->tx_buffer_info); 4095 tx_ring->tx_buffer_info = NULL; 4096 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); 4097 return -ENOMEM; 4098 } 4099 4100 /** 4101 * igb_setup_all_tx_resources - wrapper to allocate Tx resources 4102 * (Descriptors) for all queues 4103 * @adapter: board private structure 4104 * 4105 * Return 0 on success, negative on failure 4106 **/ 4107 static int igb_setup_all_tx_resources(struct igb_adapter *adapter) 4108 { 4109 struct pci_dev *pdev = adapter->pdev; 4110 int i, err = 0; 4111 4112 for (i = 0; i < adapter->num_tx_queues; i++) { 4113 err = igb_setup_tx_resources(adapter->tx_ring[i]); 4114 if (err) { 4115 dev_err(&pdev->dev, 4116 "Allocation for Tx Queue %u failed\n", i); 4117 for (i--; i >= 0; i--) 4118 igb_free_tx_resources(adapter->tx_ring[i]); 4119 break; 4120 } 4121 } 4122 4123 return err; 4124 } 4125 4126 /** 4127 * igb_setup_tctl - configure the transmit control registers 4128 * @adapter: Board private structure 4129 **/ 4130 void igb_setup_tctl(struct igb_adapter *adapter) 4131 { 4132 struct e1000_hw *hw = &adapter->hw; 4133 u32 tctl; 4134 4135 /* disable queue 0 which is enabled by default on 82575 and 82576 */ 4136 wr32(E1000_TXDCTL(0), 0); 4137 4138 /* Program the Transmit Control Register */ 4139 tctl = rd32(E1000_TCTL); 4140 tctl &= ~E1000_TCTL_CT; 4141 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | 4142 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 4143 4144 igb_config_collision_dist(hw); 4145 4146 /* Enable transmits */ 4147 tctl |= E1000_TCTL_EN; 4148 4149 wr32(E1000_TCTL, tctl); 4150 } 4151 4152 /** 4153 * igb_configure_tx_ring - Configure transmit ring after Reset 4154 * @adapter: board private structure 4155 * @ring: tx ring to configure 4156 * 4157 * Configure a transmit ring after a reset. 4158 **/ 4159 void igb_configure_tx_ring(struct igb_adapter *adapter, 4160 struct igb_ring *ring) 4161 { 4162 struct e1000_hw *hw = &adapter->hw; 4163 u32 txdctl = 0; 4164 u64 tdba = ring->dma; 4165 int reg_idx = ring->reg_idx; 4166 4167 wr32(E1000_TDLEN(reg_idx), 4168 ring->count * sizeof(union e1000_adv_tx_desc)); 4169 wr32(E1000_TDBAL(reg_idx), 4170 tdba & 0x00000000ffffffffULL); 4171 wr32(E1000_TDBAH(reg_idx), tdba >> 32); 4172 4173 ring->tail = adapter->io_addr + E1000_TDT(reg_idx); 4174 wr32(E1000_TDH(reg_idx), 0); 4175 writel(0, ring->tail); 4176 4177 txdctl |= IGB_TX_PTHRESH; 4178 txdctl |= IGB_TX_HTHRESH << 8; 4179 txdctl |= IGB_TX_WTHRESH << 16; 4180 4181 /* reinitialize tx_buffer_info */ 4182 memset(ring->tx_buffer_info, 0, 4183 sizeof(struct igb_tx_buffer) * ring->count); 4184 4185 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 4186 wr32(E1000_TXDCTL(reg_idx), txdctl); 4187 } 4188 4189 /** 4190 * igb_configure_tx - Configure transmit Unit after Reset 4191 * @adapter: board private structure 4192 * 4193 * Configure the Tx unit of the MAC after a reset. 4194 **/ 4195 static void igb_configure_tx(struct igb_adapter *adapter) 4196 { 4197 struct e1000_hw *hw = &adapter->hw; 4198 int i; 4199 4200 /* disable the queues */ 4201 for (i = 0; i < adapter->num_tx_queues; i++) 4202 wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0); 4203 4204 wrfl(); 4205 usleep_range(10000, 20000); 4206 4207 for (i = 0; i < adapter->num_tx_queues; i++) 4208 igb_configure_tx_ring(adapter, adapter->tx_ring[i]); 4209 } 4210 4211 /** 4212 * igb_setup_rx_resources - allocate Rx resources (Descriptors) 4213 * @rx_ring: Rx descriptor ring (for a specific queue) to setup 4214 * 4215 * Returns 0 on success, negative on failure 4216 **/ 4217 int igb_setup_rx_resources(struct igb_ring *rx_ring) 4218 { 4219 struct device *dev = rx_ring->dev; 4220 int size; 4221 4222 size = sizeof(struct igb_rx_buffer) * rx_ring->count; 4223 4224 rx_ring->rx_buffer_info = vmalloc(size); 4225 if (!rx_ring->rx_buffer_info) 4226 goto err; 4227 4228 /* Round up to nearest 4K */ 4229 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); 4230 rx_ring->size = ALIGN(rx_ring->size, 4096); 4231 4232 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, 4233 &rx_ring->dma, GFP_KERNEL); 4234 if (!rx_ring->desc) 4235 goto err; 4236 4237 rx_ring->next_to_alloc = 0; 4238 rx_ring->next_to_clean = 0; 4239 rx_ring->next_to_use = 0; 4240 4241 return 0; 4242 4243 err: 4244 vfree(rx_ring->rx_buffer_info); 4245 rx_ring->rx_buffer_info = NULL; 4246 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); 4247 return -ENOMEM; 4248 } 4249 4250 /** 4251 * igb_setup_all_rx_resources - wrapper to allocate Rx resources 4252 * (Descriptors) for all queues 4253 * @adapter: board private structure 4254 * 4255 * Return 0 on success, negative on failure 4256 **/ 4257 static int igb_setup_all_rx_resources(struct igb_adapter *adapter) 4258 { 4259 struct pci_dev *pdev = adapter->pdev; 4260 int i, err = 0; 4261 4262 for (i = 0; i < adapter->num_rx_queues; i++) { 4263 err = igb_setup_rx_resources(adapter->rx_ring[i]); 4264 if (err) { 4265 dev_err(&pdev->dev, 4266 "Allocation for Rx Queue %u failed\n", i); 4267 for (i--; i >= 0; i--) 4268 igb_free_rx_resources(adapter->rx_ring[i]); 4269 break; 4270 } 4271 } 4272 4273 return err; 4274 } 4275 4276 /** 4277 * igb_setup_mrqc - configure the multiple receive queue control registers 4278 * @adapter: Board private structure 4279 **/ 4280 static void igb_setup_mrqc(struct igb_adapter *adapter) 4281 { 4282 struct e1000_hw *hw = &adapter->hw; 4283 u32 mrqc, rxcsum; 4284 u32 j, num_rx_queues; 4285 u32 rss_key[10]; 4286 4287 netdev_rss_key_fill(rss_key, sizeof(rss_key)); 4288 for (j = 0; j < 10; j++) 4289 wr32(E1000_RSSRK(j), rss_key[j]); 4290 4291 num_rx_queues = adapter->rss_queues; 4292 4293 switch (hw->mac.type) { 4294 case e1000_82576: 4295 /* 82576 supports 2 RSS queues for SR-IOV */ 4296 if (adapter->vfs_allocated_count) 4297 num_rx_queues = 2; 4298 break; 4299 default: 4300 break; 4301 } 4302 4303 if (adapter->rss_indir_tbl_init != num_rx_queues) { 4304 for (j = 0; j < IGB_RETA_SIZE; j++) 4305 adapter->rss_indir_tbl[j] = 4306 (j * num_rx_queues) / IGB_RETA_SIZE; 4307 adapter->rss_indir_tbl_init = num_rx_queues; 4308 } 4309 igb_write_rss_indir_tbl(adapter); 4310 4311 /* Disable raw packet checksumming so that RSS hash is placed in 4312 * descriptor on writeback. No need to enable TCP/UDP/IP checksum 4313 * offloads as they are enabled by default 4314 */ 4315 rxcsum = rd32(E1000_RXCSUM); 4316 rxcsum |= E1000_RXCSUM_PCSD; 4317 4318 if (adapter->hw.mac.type >= e1000_82576) 4319 /* Enable Receive Checksum Offload for SCTP */ 4320 rxcsum |= E1000_RXCSUM_CRCOFL; 4321 4322 /* Don't need to set TUOFL or IPOFL, they default to 1 */ 4323 wr32(E1000_RXCSUM, rxcsum); 4324 4325 /* Generate RSS hash based on packet types, TCP/UDP 4326 * port numbers and/or IPv4/v6 src and dst addresses 4327 */ 4328 mrqc = E1000_MRQC_RSS_FIELD_IPV4 | 4329 E1000_MRQC_RSS_FIELD_IPV4_TCP | 4330 E1000_MRQC_RSS_FIELD_IPV6 | 4331 E1000_MRQC_RSS_FIELD_IPV6_TCP | 4332 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX; 4333 4334 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) 4335 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; 4336 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) 4337 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; 4338 4339 /* If VMDq is enabled then we set the appropriate mode for that, else 4340 * we default to RSS so that an RSS hash is calculated per packet even 4341 * if we are only using one queue 4342 */ 4343 if (adapter->vfs_allocated_count) { 4344 if (hw->mac.type > e1000_82575) { 4345 /* Set the default pool for the PF's first queue */ 4346 u32 vtctl = rd32(E1000_VT_CTL); 4347 4348 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | 4349 E1000_VT_CTL_DISABLE_DEF_POOL); 4350 vtctl |= adapter->vfs_allocated_count << 4351 E1000_VT_CTL_DEFAULT_POOL_SHIFT; 4352 wr32(E1000_VT_CTL, vtctl); 4353 } 4354 if (adapter->rss_queues > 1) 4355 mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ; 4356 else 4357 mrqc |= E1000_MRQC_ENABLE_VMDQ; 4358 } else { 4359 if (hw->mac.type != e1000_i211) 4360 mrqc |= E1000_MRQC_ENABLE_RSS_MQ; 4361 } 4362 igb_vmm_control(adapter); 4363 4364 wr32(E1000_MRQC, mrqc); 4365 } 4366 4367 /** 4368 * igb_setup_rctl - configure the receive control registers 4369 * @adapter: Board private structure 4370 **/ 4371 void igb_setup_rctl(struct igb_adapter *adapter) 4372 { 4373 struct e1000_hw *hw = &adapter->hw; 4374 u32 rctl; 4375 4376 rctl = rd32(E1000_RCTL); 4377 4378 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 4379 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); 4380 4381 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | 4382 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 4383 4384 /* enable stripping of CRC. It's unlikely this will break BMC 4385 * redirection as it did with e1000. Newer features require 4386 * that the HW strips the CRC. 4387 */ 4388 rctl |= E1000_RCTL_SECRC; 4389 4390 /* disable store bad packets and clear size bits. */ 4391 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); 4392 4393 /* enable LPE to allow for reception of jumbo frames */ 4394 rctl |= E1000_RCTL_LPE; 4395 4396 /* disable queue 0 to prevent tail write w/o re-config */ 4397 wr32(E1000_RXDCTL(0), 0); 4398 4399 /* Attention!!! For SR-IOV PF driver operations you must enable 4400 * queue drop for all VF and PF queues to prevent head of line blocking 4401 * if an un-trusted VF does not provide descriptors to hardware. 4402 */ 4403 if (adapter->vfs_allocated_count) { 4404 /* set all queue drop enable bits */ 4405 wr32(E1000_QDE, ALL_QUEUES); 4406 } 4407 4408 /* This is useful for sniffing bad packets. */ 4409 if (adapter->netdev->features & NETIF_F_RXALL) { 4410 /* UPE and MPE will be handled by normal PROMISC logic 4411 * in e1000e_set_rx_mode 4412 */ 4413 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ 4414 E1000_RCTL_BAM | /* RX All Bcast Pkts */ 4415 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 4416 4417 rctl &= ~(E1000_RCTL_DPF | /* Allow filtered pause */ 4418 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ 4419 /* Do not mess with E1000_CTRL_VME, it affects transmit as well, 4420 * and that breaks VLANs. 4421 */ 4422 } 4423 4424 wr32(E1000_RCTL, rctl); 4425 } 4426 4427 static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, 4428 int vfn) 4429 { 4430 struct e1000_hw *hw = &adapter->hw; 4431 u32 vmolr; 4432 4433 if (size > MAX_JUMBO_FRAME_SIZE) 4434 size = MAX_JUMBO_FRAME_SIZE; 4435 4436 vmolr = rd32(E1000_VMOLR(vfn)); 4437 vmolr &= ~E1000_VMOLR_RLPML_MASK; 4438 vmolr |= size | E1000_VMOLR_LPE; 4439 wr32(E1000_VMOLR(vfn), vmolr); 4440 4441 return 0; 4442 } 4443 4444 static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter, 4445 int vfn, bool enable) 4446 { 4447 struct e1000_hw *hw = &adapter->hw; 4448 u32 val, reg; 4449 4450 if (hw->mac.type < e1000_82576) 4451 return; 4452 4453 if (hw->mac.type == e1000_i350) 4454 reg = E1000_DVMOLR(vfn); 4455 else 4456 reg = E1000_VMOLR(vfn); 4457 4458 val = rd32(reg); 4459 if (enable) 4460 val |= E1000_VMOLR_STRVLAN; 4461 else 4462 val &= ~(E1000_VMOLR_STRVLAN); 4463 wr32(reg, val); 4464 } 4465 4466 static inline void igb_set_vmolr(struct igb_adapter *adapter, 4467 int vfn, bool aupe) 4468 { 4469 struct e1000_hw *hw = &adapter->hw; 4470 u32 vmolr; 4471 4472 /* This register exists only on 82576 and newer so if we are older then 4473 * we should exit and do nothing 4474 */ 4475 if (hw->mac.type < e1000_82576) 4476 return; 4477 4478 vmolr = rd32(E1000_VMOLR(vfn)); 4479 if (aupe) 4480 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ 4481 else 4482 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */ 4483 4484 /* clear all bits that might not be set */ 4485 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); 4486 4487 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) 4488 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ 4489 /* for VMDq only allow the VFs and pool 0 to accept broadcast and 4490 * multicast packets 4491 */ 4492 if (vfn <= adapter->vfs_allocated_count) 4493 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ 4494 4495 wr32(E1000_VMOLR(vfn), vmolr); 4496 } 4497 4498 /** 4499 * igb_configure_rx_ring - Configure a receive ring after Reset 4500 * @adapter: board private structure 4501 * @ring: receive ring to be configured 4502 * 4503 * Configure the Rx unit of the MAC after a reset. 4504 **/ 4505 void igb_configure_rx_ring(struct igb_adapter *adapter, 4506 struct igb_ring *ring) 4507 { 4508 struct e1000_hw *hw = &adapter->hw; 4509 union e1000_adv_rx_desc *rx_desc; 4510 u64 rdba = ring->dma; 4511 int reg_idx = ring->reg_idx; 4512 u32 srrctl = 0, rxdctl = 0; 4513 4514 /* disable the queue */ 4515 wr32(E1000_RXDCTL(reg_idx), 0); 4516 4517 /* Set DMA base address registers */ 4518 wr32(E1000_RDBAL(reg_idx), 4519 rdba & 0x00000000ffffffffULL); 4520 wr32(E1000_RDBAH(reg_idx), rdba >> 32); 4521 wr32(E1000_RDLEN(reg_idx), 4522 ring->count * sizeof(union e1000_adv_rx_desc)); 4523 4524 /* initialize head and tail */ 4525 ring->tail = adapter->io_addr + E1000_RDT(reg_idx); 4526 wr32(E1000_RDH(reg_idx), 0); 4527 writel(0, ring->tail); 4528 4529 /* set descriptor configuration */ 4530 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 4531 if (ring_uses_large_buffer(ring)) 4532 srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 4533 else 4534 srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 4535 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 4536 if (hw->mac.type >= e1000_82580) 4537 srrctl |= E1000_SRRCTL_TIMESTAMP; 4538 /* Only set Drop Enable if we are supporting multiple queues */ 4539 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) 4540 srrctl |= E1000_SRRCTL_DROP_EN; 4541 4542 wr32(E1000_SRRCTL(reg_idx), srrctl); 4543 4544 /* set filtering for VMDQ pools */ 4545 igb_set_vmolr(adapter, reg_idx & 0x7, true); 4546 4547 rxdctl |= IGB_RX_PTHRESH; 4548 rxdctl |= IGB_RX_HTHRESH << 8; 4549 rxdctl |= IGB_RX_WTHRESH << 16; 4550 4551 /* initialize rx_buffer_info */ 4552 memset(ring->rx_buffer_info, 0, 4553 sizeof(struct igb_rx_buffer) * ring->count); 4554 4555 /* initialize Rx descriptor 0 */ 4556 rx_desc = IGB_RX_DESC(ring, 0); 4557 rx_desc->wb.upper.length = 0; 4558 4559 /* enable receive descriptor fetching */ 4560 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 4561 wr32(E1000_RXDCTL(reg_idx), rxdctl); 4562 } 4563 4564 static void igb_set_rx_buffer_len(struct igb_adapter *adapter, 4565 struct igb_ring *rx_ring) 4566 { 4567 /* set build_skb and buffer size flags */ 4568 clear_ring_build_skb_enabled(rx_ring); 4569 clear_ring_uses_large_buffer(rx_ring); 4570 4571 if (adapter->flags & IGB_FLAG_RX_LEGACY) 4572 return; 4573 4574 set_ring_build_skb_enabled(rx_ring); 4575 4576 #if (PAGE_SIZE < 8192) 4577 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) 4578 return; 4579 4580 set_ring_uses_large_buffer(rx_ring); 4581 #endif 4582 } 4583 4584 /** 4585 * igb_configure_rx - Configure receive Unit after Reset 4586 * @adapter: board private structure 4587 * 4588 * Configure the Rx unit of the MAC after a reset. 4589 **/ 4590 static void igb_configure_rx(struct igb_adapter *adapter) 4591 { 4592 int i; 4593 4594 /* set the correct pool for the PF default MAC address in entry 0 */ 4595 igb_set_default_mac_filter(adapter); 4596 4597 /* Setup the HW Rx Head and Tail Descriptor Pointers and 4598 * the Base and Length of the Rx Descriptor Ring 4599 */ 4600 for (i = 0; i < adapter->num_rx_queues; i++) { 4601 struct igb_ring *rx_ring = adapter->rx_ring[i]; 4602 4603 igb_set_rx_buffer_len(adapter, rx_ring); 4604 igb_configure_rx_ring(adapter, rx_ring); 4605 } 4606 } 4607 4608 /** 4609 * igb_free_tx_resources - Free Tx Resources per Queue 4610 * @tx_ring: Tx descriptor ring for a specific queue 4611 * 4612 * Free all transmit software resources 4613 **/ 4614 void igb_free_tx_resources(struct igb_ring *tx_ring) 4615 { 4616 igb_clean_tx_ring(tx_ring); 4617 4618 vfree(tx_ring->tx_buffer_info); 4619 tx_ring->tx_buffer_info = NULL; 4620 4621 /* if not set, then don't free */ 4622 if (!tx_ring->desc) 4623 return; 4624 4625 dma_free_coherent(tx_ring->dev, tx_ring->size, 4626 tx_ring->desc, tx_ring->dma); 4627 4628 tx_ring->desc = NULL; 4629 } 4630 4631 /** 4632 * igb_free_all_tx_resources - Free Tx Resources for All Queues 4633 * @adapter: board private structure 4634 * 4635 * Free all transmit software resources 4636 **/ 4637 static void igb_free_all_tx_resources(struct igb_adapter *adapter) 4638 { 4639 int i; 4640 4641 for (i = 0; i < adapter->num_tx_queues; i++) 4642 if (adapter->tx_ring[i]) 4643 igb_free_tx_resources(adapter->tx_ring[i]); 4644 } 4645 4646 /** 4647 * igb_clean_tx_ring - Free Tx Buffers 4648 * @tx_ring: ring to be cleaned 4649 **/ 4650 static void igb_clean_tx_ring(struct igb_ring *tx_ring) 4651 { 4652 u16 i = tx_ring->next_to_clean; 4653 struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; 4654 4655 while (i != tx_ring->next_to_use) { 4656 union e1000_adv_tx_desc *eop_desc, *tx_desc; 4657 4658 /* Free all the Tx ring sk_buffs */ 4659 dev_kfree_skb_any(tx_buffer->skb); 4660 4661 /* unmap skb header data */ 4662 dma_unmap_single(tx_ring->dev, 4663 dma_unmap_addr(tx_buffer, dma), 4664 dma_unmap_len(tx_buffer, len), 4665 DMA_TO_DEVICE); 4666 4667 /* check for eop_desc to determine the end of the packet */ 4668 eop_desc = tx_buffer->next_to_watch; 4669 tx_desc = IGB_TX_DESC(tx_ring, i); 4670 4671 /* unmap remaining buffers */ 4672 while (tx_desc != eop_desc) { 4673 tx_buffer++; 4674 tx_desc++; 4675 i++; 4676 if (unlikely(i == tx_ring->count)) { 4677 i = 0; 4678 tx_buffer = tx_ring->tx_buffer_info; 4679 tx_desc = IGB_TX_DESC(tx_ring, 0); 4680 } 4681 4682 /* unmap any remaining paged data */ 4683 if (dma_unmap_len(tx_buffer, len)) 4684 dma_unmap_page(tx_ring->dev, 4685 dma_unmap_addr(tx_buffer, dma), 4686 dma_unmap_len(tx_buffer, len), 4687 DMA_TO_DEVICE); 4688 } 4689 4690 /* move us one more past the eop_desc for start of next pkt */ 4691 tx_buffer++; 4692 i++; 4693 if (unlikely(i == tx_ring->count)) { 4694 i = 0; 4695 tx_buffer = tx_ring->tx_buffer_info; 4696 } 4697 } 4698 4699 /* reset BQL for queue */ 4700 netdev_tx_reset_queue(txring_txq(tx_ring)); 4701 4702 /* reset next_to_use and next_to_clean */ 4703 tx_ring->next_to_use = 0; 4704 tx_ring->next_to_clean = 0; 4705 } 4706 4707 /** 4708 * igb_clean_all_tx_rings - Free Tx Buffers for all queues 4709 * @adapter: board private structure 4710 **/ 4711 static void igb_clean_all_tx_rings(struct igb_adapter *adapter) 4712 { 4713 int i; 4714 4715 for (i = 0; i < adapter->num_tx_queues; i++) 4716 if (adapter->tx_ring[i]) 4717 igb_clean_tx_ring(adapter->tx_ring[i]); 4718 } 4719 4720 /** 4721 * igb_free_rx_resources - Free Rx Resources 4722 * @rx_ring: ring to clean the resources from 4723 * 4724 * Free all receive software resources 4725 **/ 4726 void igb_free_rx_resources(struct igb_ring *rx_ring) 4727 { 4728 igb_clean_rx_ring(rx_ring); 4729 4730 vfree(rx_ring->rx_buffer_info); 4731 rx_ring->rx_buffer_info = NULL; 4732 4733 /* if not set, then don't free */ 4734 if (!rx_ring->desc) 4735 return; 4736 4737 dma_free_coherent(rx_ring->dev, rx_ring->size, 4738 rx_ring->desc, rx_ring->dma); 4739 4740 rx_ring->desc = NULL; 4741 } 4742 4743 /** 4744 * igb_free_all_rx_resources - Free Rx Resources for All Queues 4745 * @adapter: board private structure 4746 * 4747 * Free all receive software resources 4748 **/ 4749 static void igb_free_all_rx_resources(struct igb_adapter *adapter) 4750 { 4751 int i; 4752 4753 for (i = 0; i < adapter->num_rx_queues; i++) 4754 if (adapter->rx_ring[i]) 4755 igb_free_rx_resources(adapter->rx_ring[i]); 4756 } 4757 4758 /** 4759 * igb_clean_rx_ring - Free Rx Buffers per Queue 4760 * @rx_ring: ring to free buffers from 4761 **/ 4762 static void igb_clean_rx_ring(struct igb_ring *rx_ring) 4763 { 4764 u16 i = rx_ring->next_to_clean; 4765 4766 if (rx_ring->skb) 4767 dev_kfree_skb(rx_ring->skb); 4768 rx_ring->skb = NULL; 4769 4770 /* Free all the Rx ring sk_buffs */ 4771 while (i != rx_ring->next_to_alloc) { 4772 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; 4773 4774 /* Invalidate cache lines that may have been written to by 4775 * device so that we avoid corrupting memory. 4776 */ 4777 dma_sync_single_range_for_cpu(rx_ring->dev, 4778 buffer_info->dma, 4779 buffer_info->page_offset, 4780 igb_rx_bufsz(rx_ring), 4781 DMA_FROM_DEVICE); 4782 4783 /* free resources associated with mapping */ 4784 dma_unmap_page_attrs(rx_ring->dev, 4785 buffer_info->dma, 4786 igb_rx_pg_size(rx_ring), 4787 DMA_FROM_DEVICE, 4788 IGB_RX_DMA_ATTR); 4789 __page_frag_cache_drain(buffer_info->page, 4790 buffer_info->pagecnt_bias); 4791 4792 i++; 4793 if (i == rx_ring->count) 4794 i = 0; 4795 } 4796 4797 rx_ring->next_to_alloc = 0; 4798 rx_ring->next_to_clean = 0; 4799 rx_ring->next_to_use = 0; 4800 } 4801 4802 /** 4803 * igb_clean_all_rx_rings - Free Rx Buffers for all queues 4804 * @adapter: board private structure 4805 **/ 4806 static void igb_clean_all_rx_rings(struct igb_adapter *adapter) 4807 { 4808 int i; 4809 4810 for (i = 0; i < adapter->num_rx_queues; i++) 4811 if (adapter->rx_ring[i]) 4812 igb_clean_rx_ring(adapter->rx_ring[i]); 4813 } 4814 4815 /** 4816 * igb_set_mac - Change the Ethernet Address of the NIC 4817 * @netdev: network interface device structure 4818 * @p: pointer to an address structure 4819 * 4820 * Returns 0 on success, negative on failure 4821 **/ 4822 static int igb_set_mac(struct net_device *netdev, void *p) 4823 { 4824 struct igb_adapter *adapter = netdev_priv(netdev); 4825 struct e1000_hw *hw = &adapter->hw; 4826 struct sockaddr *addr = p; 4827 4828 if (!is_valid_ether_addr(addr->sa_data)) 4829 return -EADDRNOTAVAIL; 4830 4831 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 4832 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 4833 4834 /* set the correct pool for the new PF MAC address in entry 0 */ 4835 igb_set_default_mac_filter(adapter); 4836 4837 return 0; 4838 } 4839 4840 /** 4841 * igb_write_mc_addr_list - write multicast addresses to MTA 4842 * @netdev: network interface device structure 4843 * 4844 * Writes multicast address list to the MTA hash table. 4845 * Returns: -ENOMEM on failure 4846 * 0 on no addresses written 4847 * X on writing X addresses to MTA 4848 **/ 4849 static int igb_write_mc_addr_list(struct net_device *netdev) 4850 { 4851 struct igb_adapter *adapter = netdev_priv(netdev); 4852 struct e1000_hw *hw = &adapter->hw; 4853 struct netdev_hw_addr *ha; 4854 u8 *mta_list; 4855 int i; 4856 4857 if (netdev_mc_empty(netdev)) { 4858 /* nothing to program, so clear mc list */ 4859 igb_update_mc_addr_list(hw, NULL, 0); 4860 igb_restore_vf_multicasts(adapter); 4861 return 0; 4862 } 4863 4864 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC); 4865 if (!mta_list) 4866 return -ENOMEM; 4867 4868 /* The shared function expects a packed array of only addresses. */ 4869 i = 0; 4870 netdev_for_each_mc_addr(ha, netdev) 4871 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 4872 4873 igb_update_mc_addr_list(hw, mta_list, i); 4874 kfree(mta_list); 4875 4876 return netdev_mc_count(netdev); 4877 } 4878 4879 static int igb_vlan_promisc_enable(struct igb_adapter *adapter) 4880 { 4881 struct e1000_hw *hw = &adapter->hw; 4882 u32 i, pf_id; 4883 4884 switch (hw->mac.type) { 4885 case e1000_i210: 4886 case e1000_i211: 4887 case e1000_i350: 4888 /* VLAN filtering needed for VLAN prio filter */ 4889 if (adapter->netdev->features & NETIF_F_NTUPLE) 4890 break; 4891 /* fall through */ 4892 case e1000_82576: 4893 case e1000_82580: 4894 case e1000_i354: 4895 /* VLAN filtering needed for pool filtering */ 4896 if (adapter->vfs_allocated_count) 4897 break; 4898 /* fall through */ 4899 default: 4900 return 1; 4901 } 4902 4903 /* We are already in VLAN promisc, nothing to do */ 4904 if (adapter->flags & IGB_FLAG_VLAN_PROMISC) 4905 return 0; 4906 4907 if (!adapter->vfs_allocated_count) 4908 goto set_vfta; 4909 4910 /* Add PF to all active pools */ 4911 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; 4912 4913 for (i = E1000_VLVF_ARRAY_SIZE; --i;) { 4914 u32 vlvf = rd32(E1000_VLVF(i)); 4915 4916 vlvf |= BIT(pf_id); 4917 wr32(E1000_VLVF(i), vlvf); 4918 } 4919 4920 set_vfta: 4921 /* Set all bits in the VLAN filter table array */ 4922 for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;) 4923 hw->mac.ops.write_vfta(hw, i, ~0U); 4924 4925 /* Set flag so we don't redo unnecessary work */ 4926 adapter->flags |= IGB_FLAG_VLAN_PROMISC; 4927 4928 return 0; 4929 } 4930 4931 #define VFTA_BLOCK_SIZE 8 4932 static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset) 4933 { 4934 struct e1000_hw *hw = &adapter->hw; 4935 u32 vfta[VFTA_BLOCK_SIZE] = { 0 }; 4936 u32 vid_start = vfta_offset * 32; 4937 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32); 4938 u32 i, vid, word, bits, pf_id; 4939 4940 /* guarantee that we don't scrub out management VLAN */ 4941 vid = adapter->mng_vlan_id; 4942 if (vid >= vid_start && vid < vid_end) 4943 vfta[(vid - vid_start) / 32] |= BIT(vid % 32); 4944 4945 if (!adapter->vfs_allocated_count) 4946 goto set_vfta; 4947 4948 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; 4949 4950 for (i = E1000_VLVF_ARRAY_SIZE; --i;) { 4951 u32 vlvf = rd32(E1000_VLVF(i)); 4952 4953 /* pull VLAN ID from VLVF */ 4954 vid = vlvf & VLAN_VID_MASK; 4955 4956 /* only concern ourselves with a certain range */ 4957 if (vid < vid_start || vid >= vid_end) 4958 continue; 4959 4960 if (vlvf & E1000_VLVF_VLANID_ENABLE) { 4961 /* record VLAN ID in VFTA */ 4962 vfta[(vid - vid_start) / 32] |= BIT(vid % 32); 4963 4964 /* if PF is part of this then continue */ 4965 if (test_bit(vid, adapter->active_vlans)) 4966 continue; 4967 } 4968 4969 /* remove PF from the pool */ 4970 bits = ~BIT(pf_id); 4971 bits &= rd32(E1000_VLVF(i)); 4972 wr32(E1000_VLVF(i), bits); 4973 } 4974 4975 set_vfta: 4976 /* extract values from active_vlans and write back to VFTA */ 4977 for (i = VFTA_BLOCK_SIZE; i--;) { 4978 vid = (vfta_offset + i) * 32; 4979 word = vid / BITS_PER_LONG; 4980 bits = vid % BITS_PER_LONG; 4981 4982 vfta[i] |= adapter->active_vlans[word] >> bits; 4983 4984 hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]); 4985 } 4986 } 4987 4988 static void igb_vlan_promisc_disable(struct igb_adapter *adapter) 4989 { 4990 u32 i; 4991 4992 /* We are not in VLAN promisc, nothing to do */ 4993 if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC)) 4994 return; 4995 4996 /* Set flag so we don't redo unnecessary work */ 4997 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC; 4998 4999 for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE) 5000 igb_scrub_vfta(adapter, i); 5001 } 5002 5003 /** 5004 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 5005 * @netdev: network interface device structure 5006 * 5007 * The set_rx_mode entry point is called whenever the unicast or multicast 5008 * address lists or the network interface flags are updated. This routine is 5009 * responsible for configuring the hardware for proper unicast, multicast, 5010 * promiscuous mode, and all-multi behavior. 5011 **/ 5012 static void igb_set_rx_mode(struct net_device *netdev) 5013 { 5014 struct igb_adapter *adapter = netdev_priv(netdev); 5015 struct e1000_hw *hw = &adapter->hw; 5016 unsigned int vfn = adapter->vfs_allocated_count; 5017 u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE; 5018 int count; 5019 5020 /* Check for Promiscuous and All Multicast modes */ 5021 if (netdev->flags & IFF_PROMISC) { 5022 rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE; 5023 vmolr |= E1000_VMOLR_MPME; 5024 5025 /* enable use of UTA filter to force packets to default pool */ 5026 if (hw->mac.type == e1000_82576) 5027 vmolr |= E1000_VMOLR_ROPE; 5028 } else { 5029 if (netdev->flags & IFF_ALLMULTI) { 5030 rctl |= E1000_RCTL_MPE; 5031 vmolr |= E1000_VMOLR_MPME; 5032 } else { 5033 /* Write addresses to the MTA, if the attempt fails 5034 * then we should just turn on promiscuous mode so 5035 * that we can at least receive multicast traffic 5036 */ 5037 count = igb_write_mc_addr_list(netdev); 5038 if (count < 0) { 5039 rctl |= E1000_RCTL_MPE; 5040 vmolr |= E1000_VMOLR_MPME; 5041 } else if (count) { 5042 vmolr |= E1000_VMOLR_ROMPE; 5043 } 5044 } 5045 } 5046 5047 /* Write addresses to available RAR registers, if there is not 5048 * sufficient space to store all the addresses then enable 5049 * unicast promiscuous mode 5050 */ 5051 if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) { 5052 rctl |= E1000_RCTL_UPE; 5053 vmolr |= E1000_VMOLR_ROPE; 5054 } 5055 5056 /* enable VLAN filtering by default */ 5057 rctl |= E1000_RCTL_VFE; 5058 5059 /* disable VLAN filtering for modes that require it */ 5060 if ((netdev->flags & IFF_PROMISC) || 5061 (netdev->features & NETIF_F_RXALL)) { 5062 /* if we fail to set all rules then just clear VFE */ 5063 if (igb_vlan_promisc_enable(adapter)) 5064 rctl &= ~E1000_RCTL_VFE; 5065 } else { 5066 igb_vlan_promisc_disable(adapter); 5067 } 5068 5069 /* update state of unicast, multicast, and VLAN filtering modes */ 5070 rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE | 5071 E1000_RCTL_VFE); 5072 wr32(E1000_RCTL, rctl); 5073 5074 #if (PAGE_SIZE < 8192) 5075 if (!adapter->vfs_allocated_count) { 5076 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) 5077 rlpml = IGB_MAX_FRAME_BUILD_SKB; 5078 } 5079 #endif 5080 wr32(E1000_RLPML, rlpml); 5081 5082 /* In order to support SR-IOV and eventually VMDq it is necessary to set 5083 * the VMOLR to enable the appropriate modes. Without this workaround 5084 * we will have issues with VLAN tag stripping not being done for frames 5085 * that are only arriving because we are the default pool 5086 */ 5087 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350)) 5088 return; 5089 5090 /* set UTA to appropriate mode */ 5091 igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE)); 5092 5093 vmolr |= rd32(E1000_VMOLR(vfn)) & 5094 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); 5095 5096 /* enable Rx jumbo frames, restrict as needed to support build_skb */ 5097 vmolr &= ~E1000_VMOLR_RLPML_MASK; 5098 #if (PAGE_SIZE < 8192) 5099 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) 5100 vmolr |= IGB_MAX_FRAME_BUILD_SKB; 5101 else 5102 #endif 5103 vmolr |= MAX_JUMBO_FRAME_SIZE; 5104 vmolr |= E1000_VMOLR_LPE; 5105 5106 wr32(E1000_VMOLR(vfn), vmolr); 5107 5108 igb_restore_vf_multicasts(adapter); 5109 } 5110 5111 static void igb_check_wvbr(struct igb_adapter *adapter) 5112 { 5113 struct e1000_hw *hw = &adapter->hw; 5114 u32 wvbr = 0; 5115 5116 switch (hw->mac.type) { 5117 case e1000_82576: 5118 case e1000_i350: 5119 wvbr = rd32(E1000_WVBR); 5120 if (!wvbr) 5121 return; 5122 break; 5123 default: 5124 break; 5125 } 5126 5127 adapter->wvbr |= wvbr; 5128 } 5129 5130 #define IGB_STAGGERED_QUEUE_OFFSET 8 5131 5132 static void igb_spoof_check(struct igb_adapter *adapter) 5133 { 5134 int j; 5135 5136 if (!adapter->wvbr) 5137 return; 5138 5139 for (j = 0; j < adapter->vfs_allocated_count; j++) { 5140 if (adapter->wvbr & BIT(j) || 5141 adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) { 5142 dev_warn(&adapter->pdev->dev, 5143 "Spoof event(s) detected on VF %d\n", j); 5144 adapter->wvbr &= 5145 ~(BIT(j) | 5146 BIT(j + IGB_STAGGERED_QUEUE_OFFSET)); 5147 } 5148 } 5149 } 5150 5151 /* Need to wait a few seconds after link up to get diagnostic information from 5152 * the phy 5153 */ 5154 static void igb_update_phy_info(struct timer_list *t) 5155 { 5156 struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer); 5157 igb_get_phy_info(&adapter->hw); 5158 } 5159 5160 /** 5161 * igb_has_link - check shared code for link and determine up/down 5162 * @adapter: pointer to driver private info 5163 **/ 5164 bool igb_has_link(struct igb_adapter *adapter) 5165 { 5166 struct e1000_hw *hw = &adapter->hw; 5167 bool link_active = false; 5168 5169 /* get_link_status is set on LSC (link status) interrupt or 5170 * rx sequence error interrupt. get_link_status will stay 5171 * false until the e1000_check_for_link establishes link 5172 * for copper adapters ONLY 5173 */ 5174 switch (hw->phy.media_type) { 5175 case e1000_media_type_copper: 5176 if (!hw->mac.get_link_status) 5177 return true; 5178 /* fall through */ 5179 case e1000_media_type_internal_serdes: 5180 hw->mac.ops.check_for_link(hw); 5181 link_active = !hw->mac.get_link_status; 5182 break; 5183 default: 5184 case e1000_media_type_unknown: 5185 break; 5186 } 5187 5188 if (((hw->mac.type == e1000_i210) || 5189 (hw->mac.type == e1000_i211)) && 5190 (hw->phy.id == I210_I_PHY_ID)) { 5191 if (!netif_carrier_ok(adapter->netdev)) { 5192 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; 5193 } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) { 5194 adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE; 5195 adapter->link_check_timeout = jiffies; 5196 } 5197 } 5198 5199 return link_active; 5200 } 5201 5202 static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event) 5203 { 5204 bool ret = false; 5205 u32 ctrl_ext, thstat; 5206 5207 /* check for thermal sensor event on i350 copper only */ 5208 if (hw->mac.type == e1000_i350) { 5209 thstat = rd32(E1000_THSTAT); 5210 ctrl_ext = rd32(E1000_CTRL_EXT); 5211 5212 if ((hw->phy.media_type == e1000_media_type_copper) && 5213 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) 5214 ret = !!(thstat & event); 5215 } 5216 5217 return ret; 5218 } 5219 5220 /** 5221 * igb_check_lvmmc - check for malformed packets received 5222 * and indicated in LVMMC register 5223 * @adapter: pointer to adapter 5224 **/ 5225 static void igb_check_lvmmc(struct igb_adapter *adapter) 5226 { 5227 struct e1000_hw *hw = &adapter->hw; 5228 u32 lvmmc; 5229 5230 lvmmc = rd32(E1000_LVMMC); 5231 if (lvmmc) { 5232 if (unlikely(net_ratelimit())) { 5233 netdev_warn(adapter->netdev, 5234 "malformed Tx packet detected and dropped, LVMMC:0x%08x\n", 5235 lvmmc); 5236 } 5237 } 5238 } 5239 5240 /** 5241 * igb_watchdog - Timer Call-back 5242 * @data: pointer to adapter cast into an unsigned long 5243 **/ 5244 static void igb_watchdog(struct timer_list *t) 5245 { 5246 struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer); 5247 /* Do the rest outside of interrupt context */ 5248 schedule_work(&adapter->watchdog_task); 5249 } 5250 5251 static void igb_watchdog_task(struct work_struct *work) 5252 { 5253 struct igb_adapter *adapter = container_of(work, 5254 struct igb_adapter, 5255 watchdog_task); 5256 struct e1000_hw *hw = &adapter->hw; 5257 struct e1000_phy_info *phy = &hw->phy; 5258 struct net_device *netdev = adapter->netdev; 5259 u32 link; 5260 int i; 5261 u32 connsw; 5262 u16 phy_data, retry_count = 20; 5263 5264 link = igb_has_link(adapter); 5265 5266 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) { 5267 if (time_after(jiffies, (adapter->link_check_timeout + HZ))) 5268 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; 5269 else 5270 link = false; 5271 } 5272 5273 /* Force link down if we have fiber to swap to */ 5274 if (adapter->flags & IGB_FLAG_MAS_ENABLE) { 5275 if (hw->phy.media_type == e1000_media_type_copper) { 5276 connsw = rd32(E1000_CONNSW); 5277 if (!(connsw & E1000_CONNSW_AUTOSENSE_EN)) 5278 link = 0; 5279 } 5280 } 5281 if (link) { 5282 /* Perform a reset if the media type changed. */ 5283 if (hw->dev_spec._82575.media_changed) { 5284 hw->dev_spec._82575.media_changed = false; 5285 adapter->flags |= IGB_FLAG_MEDIA_RESET; 5286 igb_reset(adapter); 5287 } 5288 /* Cancel scheduled suspend requests. */ 5289 pm_runtime_resume(netdev->dev.parent); 5290 5291 if (!netif_carrier_ok(netdev)) { 5292 u32 ctrl; 5293 5294 hw->mac.ops.get_speed_and_duplex(hw, 5295 &adapter->link_speed, 5296 &adapter->link_duplex); 5297 5298 ctrl = rd32(E1000_CTRL); 5299 /* Links status message must follow this format */ 5300 netdev_info(netdev, 5301 "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", 5302 netdev->name, 5303 adapter->link_speed, 5304 adapter->link_duplex == FULL_DUPLEX ? 5305 "Full" : "Half", 5306 (ctrl & E1000_CTRL_TFCE) && 5307 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" : 5308 (ctrl & E1000_CTRL_RFCE) ? "RX" : 5309 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None"); 5310 5311 /* disable EEE if enabled */ 5312 if ((adapter->flags & IGB_FLAG_EEE) && 5313 (adapter->link_duplex == HALF_DUPLEX)) { 5314 dev_info(&adapter->pdev->dev, 5315 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n"); 5316 adapter->hw.dev_spec._82575.eee_disable = true; 5317 adapter->flags &= ~IGB_FLAG_EEE; 5318 } 5319 5320 /* check if SmartSpeed worked */ 5321 igb_check_downshift(hw); 5322 if (phy->speed_downgraded) 5323 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); 5324 5325 /* check for thermal sensor event */ 5326 if (igb_thermal_sensor_event(hw, 5327 E1000_THSTAT_LINK_THROTTLE)) 5328 netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n"); 5329 5330 /* adjust timeout factor according to speed/duplex */ 5331 adapter->tx_timeout_factor = 1; 5332 switch (adapter->link_speed) { 5333 case SPEED_10: 5334 adapter->tx_timeout_factor = 14; 5335 break; 5336 case SPEED_100: 5337 /* maybe add some timeout factor ? */ 5338 break; 5339 } 5340 5341 if (adapter->link_speed != SPEED_1000) 5342 goto no_wait; 5343 5344 /* wait for Remote receiver status OK */ 5345 retry_read_status: 5346 if (!igb_read_phy_reg(hw, PHY_1000T_STATUS, 5347 &phy_data)) { 5348 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && 5349 retry_count) { 5350 msleep(100); 5351 retry_count--; 5352 goto retry_read_status; 5353 } else if (!retry_count) { 5354 dev_err(&adapter->pdev->dev, "exceed max 2 second\n"); 5355 } 5356 } else { 5357 dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n"); 5358 } 5359 no_wait: 5360 netif_carrier_on(netdev); 5361 5362 igb_ping_all_vfs(adapter); 5363 igb_check_vf_rate_limit(adapter); 5364 5365 /* link state has changed, schedule phy info update */ 5366 if (!test_bit(__IGB_DOWN, &adapter->state)) 5367 mod_timer(&adapter->phy_info_timer, 5368 round_jiffies(jiffies + 2 * HZ)); 5369 } 5370 } else { 5371 if (netif_carrier_ok(netdev)) { 5372 adapter->link_speed = 0; 5373 adapter->link_duplex = 0; 5374 5375 /* check for thermal sensor event */ 5376 if (igb_thermal_sensor_event(hw, 5377 E1000_THSTAT_PWR_DOWN)) { 5378 netdev_err(netdev, "The network adapter was stopped because it overheated\n"); 5379 } 5380 5381 /* Links status message must follow this format */ 5382 netdev_info(netdev, "igb: %s NIC Link is Down\n", 5383 netdev->name); 5384 netif_carrier_off(netdev); 5385 5386 igb_ping_all_vfs(adapter); 5387 5388 /* link state has changed, schedule phy info update */ 5389 if (!test_bit(__IGB_DOWN, &adapter->state)) 5390 mod_timer(&adapter->phy_info_timer, 5391 round_jiffies(jiffies + 2 * HZ)); 5392 5393 /* link is down, time to check for alternate media */ 5394 if (adapter->flags & IGB_FLAG_MAS_ENABLE) { 5395 igb_check_swap_media(adapter); 5396 if (adapter->flags & IGB_FLAG_MEDIA_RESET) { 5397 schedule_work(&adapter->reset_task); 5398 /* return immediately */ 5399 return; 5400 } 5401 } 5402 pm_schedule_suspend(netdev->dev.parent, 5403 MSEC_PER_SEC * 5); 5404 5405 /* also check for alternate media here */ 5406 } else if (!netif_carrier_ok(netdev) && 5407 (adapter->flags & IGB_FLAG_MAS_ENABLE)) { 5408 igb_check_swap_media(adapter); 5409 if (adapter->flags & IGB_FLAG_MEDIA_RESET) { 5410 schedule_work(&adapter->reset_task); 5411 /* return immediately */ 5412 return; 5413 } 5414 } 5415 } 5416 5417 spin_lock(&adapter->stats64_lock); 5418 igb_update_stats(adapter); 5419 spin_unlock(&adapter->stats64_lock); 5420 5421 for (i = 0; i < adapter->num_tx_queues; i++) { 5422 struct igb_ring *tx_ring = adapter->tx_ring[i]; 5423 if (!netif_carrier_ok(netdev)) { 5424 /* We've lost link, so the controller stops DMA, 5425 * but we've got queued Tx work that's never going 5426 * to get done, so reset controller to flush Tx. 5427 * (Do the reset outside of interrupt context). 5428 */ 5429 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { 5430 adapter->tx_timeout_count++; 5431 schedule_work(&adapter->reset_task); 5432 /* return immediately since reset is imminent */ 5433 return; 5434 } 5435 } 5436 5437 /* Force detection of hung controller every watchdog period */ 5438 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 5439 } 5440 5441 /* Cause software interrupt to ensure Rx ring is cleaned */ 5442 if (adapter->flags & IGB_FLAG_HAS_MSIX) { 5443 u32 eics = 0; 5444 5445 for (i = 0; i < adapter->num_q_vectors; i++) 5446 eics |= adapter->q_vector[i]->eims_value; 5447 wr32(E1000_EICS, eics); 5448 } else { 5449 wr32(E1000_ICS, E1000_ICS_RXDMT0); 5450 } 5451 5452 igb_spoof_check(adapter); 5453 igb_ptp_rx_hang(adapter); 5454 igb_ptp_tx_hang(adapter); 5455 5456 /* Check LVMMC register on i350/i354 only */ 5457 if ((adapter->hw.mac.type == e1000_i350) || 5458 (adapter->hw.mac.type == e1000_i354)) 5459 igb_check_lvmmc(adapter); 5460 5461 /* Reset the timer */ 5462 if (!test_bit(__IGB_DOWN, &adapter->state)) { 5463 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) 5464 mod_timer(&adapter->watchdog_timer, 5465 round_jiffies(jiffies + HZ)); 5466 else 5467 mod_timer(&adapter->watchdog_timer, 5468 round_jiffies(jiffies + 2 * HZ)); 5469 } 5470 } 5471 5472 enum latency_range { 5473 lowest_latency = 0, 5474 low_latency = 1, 5475 bulk_latency = 2, 5476 latency_invalid = 255 5477 }; 5478 5479 /** 5480 * igb_update_ring_itr - update the dynamic ITR value based on packet size 5481 * @q_vector: pointer to q_vector 5482 * 5483 * Stores a new ITR value based on strictly on packet size. This 5484 * algorithm is less sophisticated than that used in igb_update_itr, 5485 * due to the difficulty of synchronizing statistics across multiple 5486 * receive rings. The divisors and thresholds used by this function 5487 * were determined based on theoretical maximum wire speed and testing 5488 * data, in order to minimize response time while increasing bulk 5489 * throughput. 5490 * This functionality is controlled by ethtool's coalescing settings. 5491 * NOTE: This function is called only when operating in a multiqueue 5492 * receive environment. 5493 **/ 5494 static void igb_update_ring_itr(struct igb_q_vector *q_vector) 5495 { 5496 int new_val = q_vector->itr_val; 5497 int avg_wire_size = 0; 5498 struct igb_adapter *adapter = q_vector->adapter; 5499 unsigned int packets; 5500 5501 /* For non-gigabit speeds, just fix the interrupt rate at 4000 5502 * ints/sec - ITR timer value of 120 ticks. 5503 */ 5504 if (adapter->link_speed != SPEED_1000) { 5505 new_val = IGB_4K_ITR; 5506 goto set_itr_val; 5507 } 5508 5509 packets = q_vector->rx.total_packets; 5510 if (packets) 5511 avg_wire_size = q_vector->rx.total_bytes / packets; 5512 5513 packets = q_vector->tx.total_packets; 5514 if (packets) 5515 avg_wire_size = max_t(u32, avg_wire_size, 5516 q_vector->tx.total_bytes / packets); 5517 5518 /* if avg_wire_size isn't set no work was done */ 5519 if (!avg_wire_size) 5520 goto clear_counts; 5521 5522 /* Add 24 bytes to size to account for CRC, preamble, and gap */ 5523 avg_wire_size += 24; 5524 5525 /* Don't starve jumbo frames */ 5526 avg_wire_size = min(avg_wire_size, 3000); 5527 5528 /* Give a little boost to mid-size frames */ 5529 if ((avg_wire_size > 300) && (avg_wire_size < 1200)) 5530 new_val = avg_wire_size / 3; 5531 else 5532 new_val = avg_wire_size / 2; 5533 5534 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 5535 if (new_val < IGB_20K_ITR && 5536 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || 5537 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) 5538 new_val = IGB_20K_ITR; 5539 5540 set_itr_val: 5541 if (new_val != q_vector->itr_val) { 5542 q_vector->itr_val = new_val; 5543 q_vector->set_itr = 1; 5544 } 5545 clear_counts: 5546 q_vector->rx.total_bytes = 0; 5547 q_vector->rx.total_packets = 0; 5548 q_vector->tx.total_bytes = 0; 5549 q_vector->tx.total_packets = 0; 5550 } 5551 5552 /** 5553 * igb_update_itr - update the dynamic ITR value based on statistics 5554 * @q_vector: pointer to q_vector 5555 * @ring_container: ring info to update the itr for 5556 * 5557 * Stores a new ITR value based on packets and byte 5558 * counts during the last interrupt. The advantage of per interrupt 5559 * computation is faster updates and more accurate ITR for the current 5560 * traffic pattern. Constants in this function were computed 5561 * based on theoretical maximum wire speed and thresholds were set based 5562 * on testing data as well as attempting to minimize response time 5563 * while increasing bulk throughput. 5564 * This functionality is controlled by ethtool's coalescing settings. 5565 * NOTE: These calculations are only valid when operating in a single- 5566 * queue environment. 5567 **/ 5568 static void igb_update_itr(struct igb_q_vector *q_vector, 5569 struct igb_ring_container *ring_container) 5570 { 5571 unsigned int packets = ring_container->total_packets; 5572 unsigned int bytes = ring_container->total_bytes; 5573 u8 itrval = ring_container->itr; 5574 5575 /* no packets, exit with status unchanged */ 5576 if (packets == 0) 5577 return; 5578 5579 switch (itrval) { 5580 case lowest_latency: 5581 /* handle TSO and jumbo frames */ 5582 if (bytes/packets > 8000) 5583 itrval = bulk_latency; 5584 else if ((packets < 5) && (bytes > 512)) 5585 itrval = low_latency; 5586 break; 5587 case low_latency: /* 50 usec aka 20000 ints/s */ 5588 if (bytes > 10000) { 5589 /* this if handles the TSO accounting */ 5590 if (bytes/packets > 8000) 5591 itrval = bulk_latency; 5592 else if ((packets < 10) || ((bytes/packets) > 1200)) 5593 itrval = bulk_latency; 5594 else if ((packets > 35)) 5595 itrval = lowest_latency; 5596 } else if (bytes/packets > 2000) { 5597 itrval = bulk_latency; 5598 } else if (packets <= 2 && bytes < 512) { 5599 itrval = lowest_latency; 5600 } 5601 break; 5602 case bulk_latency: /* 250 usec aka 4000 ints/s */ 5603 if (bytes > 25000) { 5604 if (packets > 35) 5605 itrval = low_latency; 5606 } else if (bytes < 1500) { 5607 itrval = low_latency; 5608 } 5609 break; 5610 } 5611 5612 /* clear work counters since we have the values we need */ 5613 ring_container->total_bytes = 0; 5614 ring_container->total_packets = 0; 5615 5616 /* write updated itr to ring container */ 5617 ring_container->itr = itrval; 5618 } 5619 5620 static void igb_set_itr(struct igb_q_vector *q_vector) 5621 { 5622 struct igb_adapter *adapter = q_vector->adapter; 5623 u32 new_itr = q_vector->itr_val; 5624 u8 current_itr = 0; 5625 5626 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 5627 if (adapter->link_speed != SPEED_1000) { 5628 current_itr = 0; 5629 new_itr = IGB_4K_ITR; 5630 goto set_itr_now; 5631 } 5632 5633 igb_update_itr(q_vector, &q_vector->tx); 5634 igb_update_itr(q_vector, &q_vector->rx); 5635 5636 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 5637 5638 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 5639 if (current_itr == lowest_latency && 5640 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || 5641 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) 5642 current_itr = low_latency; 5643 5644 switch (current_itr) { 5645 /* counts and packets in update_itr are dependent on these numbers */ 5646 case lowest_latency: 5647 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */ 5648 break; 5649 case low_latency: 5650 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */ 5651 break; 5652 case bulk_latency: 5653 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */ 5654 break; 5655 default: 5656 break; 5657 } 5658 5659 set_itr_now: 5660 if (new_itr != q_vector->itr_val) { 5661 /* this attempts to bias the interrupt rate towards Bulk 5662 * by adding intermediate steps when interrupt rate is 5663 * increasing 5664 */ 5665 new_itr = new_itr > q_vector->itr_val ? 5666 max((new_itr * q_vector->itr_val) / 5667 (new_itr + (q_vector->itr_val >> 2)), 5668 new_itr) : new_itr; 5669 /* Don't write the value here; it resets the adapter's 5670 * internal timer, and causes us to delay far longer than 5671 * we should between interrupts. Instead, we write the ITR 5672 * value at the beginning of the next interrupt so the timing 5673 * ends up being correct. 5674 */ 5675 q_vector->itr_val = new_itr; 5676 q_vector->set_itr = 1; 5677 } 5678 } 5679 5680 static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, 5681 struct igb_tx_buffer *first, 5682 u32 vlan_macip_lens, u32 type_tucmd, 5683 u32 mss_l4len_idx) 5684 { 5685 struct e1000_adv_tx_context_desc *context_desc; 5686 u16 i = tx_ring->next_to_use; 5687 struct timespec64 ts; 5688 5689 context_desc = IGB_TX_CTXTDESC(tx_ring, i); 5690 5691 i++; 5692 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 5693 5694 /* set bits to identify this as an advanced context descriptor */ 5695 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 5696 5697 /* For 82575, context index must be unique per ring. */ 5698 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) 5699 mss_l4len_idx |= tx_ring->reg_idx << 4; 5700 5701 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 5702 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 5703 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 5704 5705 /* We assume there is always a valid tx time available. Invalid times 5706 * should have been handled by the upper layers. 5707 */ 5708 if (tx_ring->launchtime_enable) { 5709 ts = ns_to_timespec64(first->skb->tstamp); 5710 context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); 5711 } else { 5712 context_desc->seqnum_seed = 0; 5713 } 5714 } 5715 5716 static int igb_tso(struct igb_ring *tx_ring, 5717 struct igb_tx_buffer *first, 5718 u8 *hdr_len) 5719 { 5720 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; 5721 struct sk_buff *skb = first->skb; 5722 union { 5723 struct iphdr *v4; 5724 struct ipv6hdr *v6; 5725 unsigned char *hdr; 5726 } ip; 5727 union { 5728 struct tcphdr *tcp; 5729 unsigned char *hdr; 5730 } l4; 5731 u32 paylen, l4_offset; 5732 int err; 5733 5734 if (skb->ip_summed != CHECKSUM_PARTIAL) 5735 return 0; 5736 5737 if (!skb_is_gso(skb)) 5738 return 0; 5739 5740 err = skb_cow_head(skb, 0); 5741 if (err < 0) 5742 return err; 5743 5744 ip.hdr = skb_network_header(skb); 5745 l4.hdr = skb_checksum_start(skb); 5746 5747 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 5748 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; 5749 5750 /* initialize outer IP header fields */ 5751 if (ip.v4->version == 4) { 5752 unsigned char *csum_start = skb_checksum_start(skb); 5753 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); 5754 5755 /* IP header will have to cancel out any data that 5756 * is not a part of the outer IP header 5757 */ 5758 ip.v4->check = csum_fold(csum_partial(trans_start, 5759 csum_start - trans_start, 5760 0)); 5761 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; 5762 5763 ip.v4->tot_len = 0; 5764 first->tx_flags |= IGB_TX_FLAGS_TSO | 5765 IGB_TX_FLAGS_CSUM | 5766 IGB_TX_FLAGS_IPV4; 5767 } else { 5768 ip.v6->payload_len = 0; 5769 first->tx_flags |= IGB_TX_FLAGS_TSO | 5770 IGB_TX_FLAGS_CSUM; 5771 } 5772 5773 /* determine offset of inner transport header */ 5774 l4_offset = l4.hdr - skb->data; 5775 5776 /* compute length of segmentation header */ 5777 *hdr_len = (l4.tcp->doff * 4) + l4_offset; 5778 5779 /* remove payload length from inner checksum */ 5780 paylen = skb->len - l4_offset; 5781 csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); 5782 5783 /* update gso size and bytecount with header size */ 5784 first->gso_segs = skb_shinfo(skb)->gso_segs; 5785 first->bytecount += (first->gso_segs - 1) * *hdr_len; 5786 5787 /* MSS L4LEN IDX */ 5788 mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT; 5789 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT; 5790 5791 /* VLAN MACLEN IPLEN */ 5792 vlan_macip_lens = l4.hdr - ip.hdr; 5793 vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT; 5794 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; 5795 5796 igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, 5797 type_tucmd, mss_l4len_idx); 5798 5799 return 1; 5800 } 5801 5802 static inline bool igb_ipv6_csum_is_sctp(struct sk_buff *skb) 5803 { 5804 unsigned int offset = 0; 5805 5806 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); 5807 5808 return offset == skb_checksum_start_offset(skb); 5809 } 5810 5811 static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) 5812 { 5813 struct sk_buff *skb = first->skb; 5814 u32 vlan_macip_lens = 0; 5815 u32 type_tucmd = 0; 5816 5817 if (skb->ip_summed != CHECKSUM_PARTIAL) { 5818 csum_failed: 5819 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN)) 5820 return; 5821 goto no_csum; 5822 } 5823 5824 switch (skb->csum_offset) { 5825 case offsetof(struct tcphdr, check): 5826 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; 5827 /* fall through */ 5828 case offsetof(struct udphdr, check): 5829 break; 5830 case offsetof(struct sctphdr, checksum): 5831 /* validate that this is actually an SCTP request */ 5832 if (((first->protocol == htons(ETH_P_IP)) && 5833 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || 5834 ((first->protocol == htons(ETH_P_IPV6)) && 5835 igb_ipv6_csum_is_sctp(skb))) { 5836 type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP; 5837 break; 5838 } 5839 /* fall through */ 5840 default: 5841 skb_checksum_help(skb); 5842 goto csum_failed; 5843 } 5844 5845 /* update TX checksum flag */ 5846 first->tx_flags |= IGB_TX_FLAGS_CSUM; 5847 vlan_macip_lens = skb_checksum_start_offset(skb) - 5848 skb_network_offset(skb); 5849 no_csum: 5850 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; 5851 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; 5852 5853 igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); 5854 } 5855 5856 #define IGB_SET_FLAG(_input, _flag, _result) \ 5857 ((_flag <= _result) ? \ 5858 ((u32)(_input & _flag) * (_result / _flag)) : \ 5859 ((u32)(_input & _flag) / (_flag / _result))) 5860 5861 static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) 5862 { 5863 /* set type for advanced descriptor with frame checksum insertion */ 5864 u32 cmd_type = E1000_ADVTXD_DTYP_DATA | 5865 E1000_ADVTXD_DCMD_DEXT | 5866 E1000_ADVTXD_DCMD_IFCS; 5867 5868 /* set HW vlan bit if vlan is present */ 5869 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN, 5870 (E1000_ADVTXD_DCMD_VLE)); 5871 5872 /* set segmentation bits for TSO */ 5873 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO, 5874 (E1000_ADVTXD_DCMD_TSE)); 5875 5876 /* set timestamp bit if present */ 5877 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP, 5878 (E1000_ADVTXD_MAC_TSTAMP)); 5879 5880 /* insert frame checksum */ 5881 cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS); 5882 5883 return cmd_type; 5884 } 5885 5886 static void igb_tx_olinfo_status(struct igb_ring *tx_ring, 5887 union e1000_adv_tx_desc *tx_desc, 5888 u32 tx_flags, unsigned int paylen) 5889 { 5890 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT; 5891 5892 /* 82575 requires a unique index per ring */ 5893 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) 5894 olinfo_status |= tx_ring->reg_idx << 4; 5895 5896 /* insert L4 checksum */ 5897 olinfo_status |= IGB_SET_FLAG(tx_flags, 5898 IGB_TX_FLAGS_CSUM, 5899 (E1000_TXD_POPTS_TXSM << 8)); 5900 5901 /* insert IPv4 checksum */ 5902 olinfo_status |= IGB_SET_FLAG(tx_flags, 5903 IGB_TX_FLAGS_IPV4, 5904 (E1000_TXD_POPTS_IXSM << 8)); 5905 5906 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 5907 } 5908 5909 static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) 5910 { 5911 struct net_device *netdev = tx_ring->netdev; 5912 5913 netif_stop_subqueue(netdev, tx_ring->queue_index); 5914 5915 /* Herbert's original patch had: 5916 * smp_mb__after_netif_stop_queue(); 5917 * but since that doesn't exist yet, just open code it. 5918 */ 5919 smp_mb(); 5920 5921 /* We need to check again in a case another CPU has just 5922 * made room available. 5923 */ 5924 if (igb_desc_unused(tx_ring) < size) 5925 return -EBUSY; 5926 5927 /* A reprieve! */ 5928 netif_wake_subqueue(netdev, tx_ring->queue_index); 5929 5930 u64_stats_update_begin(&tx_ring->tx_syncp2); 5931 tx_ring->tx_stats.restart_queue2++; 5932 u64_stats_update_end(&tx_ring->tx_syncp2); 5933 5934 return 0; 5935 } 5936 5937 static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) 5938 { 5939 if (igb_desc_unused(tx_ring) >= size) 5940 return 0; 5941 return __igb_maybe_stop_tx(tx_ring, size); 5942 } 5943 5944 static int igb_tx_map(struct igb_ring *tx_ring, 5945 struct igb_tx_buffer *first, 5946 const u8 hdr_len) 5947 { 5948 struct sk_buff *skb = first->skb; 5949 struct igb_tx_buffer *tx_buffer; 5950 union e1000_adv_tx_desc *tx_desc; 5951 struct skb_frag_struct *frag; 5952 dma_addr_t dma; 5953 unsigned int data_len, size; 5954 u32 tx_flags = first->tx_flags; 5955 u32 cmd_type = igb_tx_cmd_type(skb, tx_flags); 5956 u16 i = tx_ring->next_to_use; 5957 5958 tx_desc = IGB_TX_DESC(tx_ring, i); 5959 5960 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); 5961 5962 size = skb_headlen(skb); 5963 data_len = skb->data_len; 5964 5965 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 5966 5967 tx_buffer = first; 5968 5969 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 5970 if (dma_mapping_error(tx_ring->dev, dma)) 5971 goto dma_error; 5972 5973 /* record length, and DMA address */ 5974 dma_unmap_len_set(tx_buffer, len, size); 5975 dma_unmap_addr_set(tx_buffer, dma, dma); 5976 5977 tx_desc->read.buffer_addr = cpu_to_le64(dma); 5978 5979 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) { 5980 tx_desc->read.cmd_type_len = 5981 cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD); 5982 5983 i++; 5984 tx_desc++; 5985 if (i == tx_ring->count) { 5986 tx_desc = IGB_TX_DESC(tx_ring, 0); 5987 i = 0; 5988 } 5989 tx_desc->read.olinfo_status = 0; 5990 5991 dma += IGB_MAX_DATA_PER_TXD; 5992 size -= IGB_MAX_DATA_PER_TXD; 5993 5994 tx_desc->read.buffer_addr = cpu_to_le64(dma); 5995 } 5996 5997 if (likely(!data_len)) 5998 break; 5999 6000 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); 6001 6002 i++; 6003 tx_desc++; 6004 if (i == tx_ring->count) { 6005 tx_desc = IGB_TX_DESC(tx_ring, 0); 6006 i = 0; 6007 } 6008 tx_desc->read.olinfo_status = 0; 6009 6010 size = skb_frag_size(frag); 6011 data_len -= size; 6012 6013 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, 6014 size, DMA_TO_DEVICE); 6015 6016 tx_buffer = &tx_ring->tx_buffer_info[i]; 6017 } 6018 6019 /* write last descriptor with RS and EOP bits */ 6020 cmd_type |= size | IGB_TXD_DCMD; 6021 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 6022 6023 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 6024 6025 /* set the timestamp */ 6026 first->time_stamp = jiffies; 6027 6028 /* Force memory writes to complete before letting h/w know there 6029 * are new descriptors to fetch. (Only applicable for weak-ordered 6030 * memory model archs, such as IA-64). 6031 * 6032 * We also need this memory barrier to make certain all of the 6033 * status bits have been updated before next_to_watch is written. 6034 */ 6035 dma_wmb(); 6036 6037 /* set next_to_watch value indicating a packet is present */ 6038 first->next_to_watch = tx_desc; 6039 6040 i++; 6041 if (i == tx_ring->count) 6042 i = 0; 6043 6044 tx_ring->next_to_use = i; 6045 6046 /* Make sure there is space in the ring for the next send. */ 6047 igb_maybe_stop_tx(tx_ring, DESC_NEEDED); 6048 6049 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { 6050 writel(i, tx_ring->tail); 6051 6052 /* we need this if more than one processor can write to our tail 6053 * at a time, it synchronizes IO on IA64/Altix systems 6054 */ 6055 mmiowb(); 6056 } 6057 return 0; 6058 6059 dma_error: 6060 dev_err(tx_ring->dev, "TX DMA map failed\n"); 6061 tx_buffer = &tx_ring->tx_buffer_info[i]; 6062 6063 /* clear dma mappings for failed tx_buffer_info map */ 6064 while (tx_buffer != first) { 6065 if (dma_unmap_len(tx_buffer, len)) 6066 dma_unmap_page(tx_ring->dev, 6067 dma_unmap_addr(tx_buffer, dma), 6068 dma_unmap_len(tx_buffer, len), 6069 DMA_TO_DEVICE); 6070 dma_unmap_len_set(tx_buffer, len, 0); 6071 6072 if (i-- == 0) 6073 i += tx_ring->count; 6074 tx_buffer = &tx_ring->tx_buffer_info[i]; 6075 } 6076 6077 if (dma_unmap_len(tx_buffer, len)) 6078 dma_unmap_single(tx_ring->dev, 6079 dma_unmap_addr(tx_buffer, dma), 6080 dma_unmap_len(tx_buffer, len), 6081 DMA_TO_DEVICE); 6082 dma_unmap_len_set(tx_buffer, len, 0); 6083 6084 dev_kfree_skb_any(tx_buffer->skb); 6085 tx_buffer->skb = NULL; 6086 6087 tx_ring->next_to_use = i; 6088 6089 return -1; 6090 } 6091 6092 netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, 6093 struct igb_ring *tx_ring) 6094 { 6095 struct igb_tx_buffer *first; 6096 int tso; 6097 u32 tx_flags = 0; 6098 unsigned short f; 6099 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 6100 __be16 protocol = vlan_get_protocol(skb); 6101 u8 hdr_len = 0; 6102 6103 /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD, 6104 * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD, 6105 * + 2 desc gap to keep tail from touching head, 6106 * + 1 desc for context descriptor, 6107 * otherwise try next time 6108 */ 6109 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 6110 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 6111 6112 if (igb_maybe_stop_tx(tx_ring, count + 3)) { 6113 /* this is a hard error */ 6114 return NETDEV_TX_BUSY; 6115 } 6116 6117 /* record the location of the first descriptor for this packet */ 6118 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 6119 first->skb = skb; 6120 first->bytecount = skb->len; 6121 first->gso_segs = 1; 6122 6123 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 6124 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); 6125 6126 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && 6127 !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS, 6128 &adapter->state)) { 6129 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 6130 tx_flags |= IGB_TX_FLAGS_TSTAMP; 6131 6132 adapter->ptp_tx_skb = skb_get(skb); 6133 adapter->ptp_tx_start = jiffies; 6134 if (adapter->hw.mac.type == e1000_82576) 6135 schedule_work(&adapter->ptp_tx_work); 6136 } else { 6137 adapter->tx_hwtstamp_skipped++; 6138 } 6139 } 6140 6141 if (skb_vlan_tag_present(skb)) { 6142 tx_flags |= IGB_TX_FLAGS_VLAN; 6143 tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); 6144 } 6145 6146 /* record initial flags and protocol */ 6147 first->tx_flags = tx_flags; 6148 first->protocol = protocol; 6149 6150 tso = igb_tso(tx_ring, first, &hdr_len); 6151 if (tso < 0) 6152 goto out_drop; 6153 else if (!tso) 6154 igb_tx_csum(tx_ring, first); 6155 6156 skb_tx_timestamp(skb); 6157 6158 if (igb_tx_map(tx_ring, first, hdr_len)) 6159 goto cleanup_tx_tstamp; 6160 6161 return NETDEV_TX_OK; 6162 6163 out_drop: 6164 dev_kfree_skb_any(first->skb); 6165 first->skb = NULL; 6166 cleanup_tx_tstamp: 6167 if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) { 6168 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); 6169 6170 dev_kfree_skb_any(adapter->ptp_tx_skb); 6171 adapter->ptp_tx_skb = NULL; 6172 if (adapter->hw.mac.type == e1000_82576) 6173 cancel_work_sync(&adapter->ptp_tx_work); 6174 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); 6175 } 6176 6177 return NETDEV_TX_OK; 6178 } 6179 6180 static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter, 6181 struct sk_buff *skb) 6182 { 6183 unsigned int r_idx = skb->queue_mapping; 6184 6185 if (r_idx >= adapter->num_tx_queues) 6186 r_idx = r_idx % adapter->num_tx_queues; 6187 6188 return adapter->tx_ring[r_idx]; 6189 } 6190 6191 static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, 6192 struct net_device *netdev) 6193 { 6194 struct igb_adapter *adapter = netdev_priv(netdev); 6195 6196 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb 6197 * in order to meet this minimum size requirement. 6198 */ 6199 if (skb_put_padto(skb, 17)) 6200 return NETDEV_TX_OK; 6201 6202 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb)); 6203 } 6204 6205 /** 6206 * igb_tx_timeout - Respond to a Tx Hang 6207 * @netdev: network interface device structure 6208 **/ 6209 static void igb_tx_timeout(struct net_device *netdev) 6210 { 6211 struct igb_adapter *adapter = netdev_priv(netdev); 6212 struct e1000_hw *hw = &adapter->hw; 6213 6214 /* Do the reset outside of interrupt context */ 6215 adapter->tx_timeout_count++; 6216 6217 if (hw->mac.type >= e1000_82580) 6218 hw->dev_spec._82575.global_device_reset = true; 6219 6220 schedule_work(&adapter->reset_task); 6221 wr32(E1000_EICS, 6222 (adapter->eims_enable_mask & ~adapter->eims_other)); 6223 } 6224 6225 static void igb_reset_task(struct work_struct *work) 6226 { 6227 struct igb_adapter *adapter; 6228 adapter = container_of(work, struct igb_adapter, reset_task); 6229 6230 igb_dump(adapter); 6231 netdev_err(adapter->netdev, "Reset adapter\n"); 6232 igb_reinit_locked(adapter); 6233 } 6234 6235 /** 6236 * igb_get_stats64 - Get System Network Statistics 6237 * @netdev: network interface device structure 6238 * @stats: rtnl_link_stats64 pointer 6239 **/ 6240 static void igb_get_stats64(struct net_device *netdev, 6241 struct rtnl_link_stats64 *stats) 6242 { 6243 struct igb_adapter *adapter = netdev_priv(netdev); 6244 6245 spin_lock(&adapter->stats64_lock); 6246 igb_update_stats(adapter); 6247 memcpy(stats, &adapter->stats64, sizeof(*stats)); 6248 spin_unlock(&adapter->stats64_lock); 6249 } 6250 6251 /** 6252 * igb_change_mtu - Change the Maximum Transfer Unit 6253 * @netdev: network interface device structure 6254 * @new_mtu: new value for maximum frame size 6255 * 6256 * Returns 0 on success, negative on failure 6257 **/ 6258 static int igb_change_mtu(struct net_device *netdev, int new_mtu) 6259 { 6260 struct igb_adapter *adapter = netdev_priv(netdev); 6261 struct pci_dev *pdev = adapter->pdev; 6262 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 6263 6264 /* adjust max frame to be at least the size of a standard frame */ 6265 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) 6266 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; 6267 6268 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 6269 usleep_range(1000, 2000); 6270 6271 /* igb_down has a dependency on max_frame_size */ 6272 adapter->max_frame_size = max_frame; 6273 6274 if (netif_running(netdev)) 6275 igb_down(adapter); 6276 6277 dev_info(&pdev->dev, "changing MTU from %d to %d\n", 6278 netdev->mtu, new_mtu); 6279 netdev->mtu = new_mtu; 6280 6281 if (netif_running(netdev)) 6282 igb_up(adapter); 6283 else 6284 igb_reset(adapter); 6285 6286 clear_bit(__IGB_RESETTING, &adapter->state); 6287 6288 return 0; 6289 } 6290 6291 /** 6292 * igb_update_stats - Update the board statistics counters 6293 * @adapter: board private structure 6294 **/ 6295 void igb_update_stats(struct igb_adapter *adapter) 6296 { 6297 struct rtnl_link_stats64 *net_stats = &adapter->stats64; 6298 struct e1000_hw *hw = &adapter->hw; 6299 struct pci_dev *pdev = adapter->pdev; 6300 u32 reg, mpc; 6301 int i; 6302 u64 bytes, packets; 6303 unsigned int start; 6304 u64 _bytes, _packets; 6305 6306 /* Prevent stats update while adapter is being reset, or if the pci 6307 * connection is down. 6308 */ 6309 if (adapter->link_speed == 0) 6310 return; 6311 if (pci_channel_offline(pdev)) 6312 return; 6313 6314 bytes = 0; 6315 packets = 0; 6316 6317 rcu_read_lock(); 6318 for (i = 0; i < adapter->num_rx_queues; i++) { 6319 struct igb_ring *ring = adapter->rx_ring[i]; 6320 u32 rqdpc = rd32(E1000_RQDPC(i)); 6321 if (hw->mac.type >= e1000_i210) 6322 wr32(E1000_RQDPC(i), 0); 6323 6324 if (rqdpc) { 6325 ring->rx_stats.drops += rqdpc; 6326 net_stats->rx_fifo_errors += rqdpc; 6327 } 6328 6329 do { 6330 start = u64_stats_fetch_begin_irq(&ring->rx_syncp); 6331 _bytes = ring->rx_stats.bytes; 6332 _packets = ring->rx_stats.packets; 6333 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); 6334 bytes += _bytes; 6335 packets += _packets; 6336 } 6337 6338 net_stats->rx_bytes = bytes; 6339 net_stats->rx_packets = packets; 6340 6341 bytes = 0; 6342 packets = 0; 6343 for (i = 0; i < adapter->num_tx_queues; i++) { 6344 struct igb_ring *ring = adapter->tx_ring[i]; 6345 do { 6346 start = u64_stats_fetch_begin_irq(&ring->tx_syncp); 6347 _bytes = ring->tx_stats.bytes; 6348 _packets = ring->tx_stats.packets; 6349 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); 6350 bytes += _bytes; 6351 packets += _packets; 6352 } 6353 net_stats->tx_bytes = bytes; 6354 net_stats->tx_packets = packets; 6355 rcu_read_unlock(); 6356 6357 /* read stats registers */ 6358 adapter->stats.crcerrs += rd32(E1000_CRCERRS); 6359 adapter->stats.gprc += rd32(E1000_GPRC); 6360 adapter->stats.gorc += rd32(E1000_GORCL); 6361 rd32(E1000_GORCH); /* clear GORCL */ 6362 adapter->stats.bprc += rd32(E1000_BPRC); 6363 adapter->stats.mprc += rd32(E1000_MPRC); 6364 adapter->stats.roc += rd32(E1000_ROC); 6365 6366 adapter->stats.prc64 += rd32(E1000_PRC64); 6367 adapter->stats.prc127 += rd32(E1000_PRC127); 6368 adapter->stats.prc255 += rd32(E1000_PRC255); 6369 adapter->stats.prc511 += rd32(E1000_PRC511); 6370 adapter->stats.prc1023 += rd32(E1000_PRC1023); 6371 adapter->stats.prc1522 += rd32(E1000_PRC1522); 6372 adapter->stats.symerrs += rd32(E1000_SYMERRS); 6373 adapter->stats.sec += rd32(E1000_SEC); 6374 6375 mpc = rd32(E1000_MPC); 6376 adapter->stats.mpc += mpc; 6377 net_stats->rx_fifo_errors += mpc; 6378 adapter->stats.scc += rd32(E1000_SCC); 6379 adapter->stats.ecol += rd32(E1000_ECOL); 6380 adapter->stats.mcc += rd32(E1000_MCC); 6381 adapter->stats.latecol += rd32(E1000_LATECOL); 6382 adapter->stats.dc += rd32(E1000_DC); 6383 adapter->stats.rlec += rd32(E1000_RLEC); 6384 adapter->stats.xonrxc += rd32(E1000_XONRXC); 6385 adapter->stats.xontxc += rd32(E1000_XONTXC); 6386 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC); 6387 adapter->stats.xofftxc += rd32(E1000_XOFFTXC); 6388 adapter->stats.fcruc += rd32(E1000_FCRUC); 6389 adapter->stats.gptc += rd32(E1000_GPTC); 6390 adapter->stats.gotc += rd32(E1000_GOTCL); 6391 rd32(E1000_GOTCH); /* clear GOTCL */ 6392 adapter->stats.rnbc += rd32(E1000_RNBC); 6393 adapter->stats.ruc += rd32(E1000_RUC); 6394 adapter->stats.rfc += rd32(E1000_RFC); 6395 adapter->stats.rjc += rd32(E1000_RJC); 6396 adapter->stats.tor += rd32(E1000_TORH); 6397 adapter->stats.tot += rd32(E1000_TOTH); 6398 adapter->stats.tpr += rd32(E1000_TPR); 6399 6400 adapter->stats.ptc64 += rd32(E1000_PTC64); 6401 adapter->stats.ptc127 += rd32(E1000_PTC127); 6402 adapter->stats.ptc255 += rd32(E1000_PTC255); 6403 adapter->stats.ptc511 += rd32(E1000_PTC511); 6404 adapter->stats.ptc1023 += rd32(E1000_PTC1023); 6405 adapter->stats.ptc1522 += rd32(E1000_PTC1522); 6406 6407 adapter->stats.mptc += rd32(E1000_MPTC); 6408 adapter->stats.bptc += rd32(E1000_BPTC); 6409 6410 adapter->stats.tpt += rd32(E1000_TPT); 6411 adapter->stats.colc += rd32(E1000_COLC); 6412 6413 adapter->stats.algnerrc += rd32(E1000_ALGNERRC); 6414 /* read internal phy specific stats */ 6415 reg = rd32(E1000_CTRL_EXT); 6416 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) { 6417 adapter->stats.rxerrc += rd32(E1000_RXERRC); 6418 6419 /* this stat has invalid values on i210/i211 */ 6420 if ((hw->mac.type != e1000_i210) && 6421 (hw->mac.type != e1000_i211)) 6422 adapter->stats.tncrs += rd32(E1000_TNCRS); 6423 } 6424 6425 adapter->stats.tsctc += rd32(E1000_TSCTC); 6426 adapter->stats.tsctfc += rd32(E1000_TSCTFC); 6427 6428 adapter->stats.iac += rd32(E1000_IAC); 6429 adapter->stats.icrxoc += rd32(E1000_ICRXOC); 6430 adapter->stats.icrxptc += rd32(E1000_ICRXPTC); 6431 adapter->stats.icrxatc += rd32(E1000_ICRXATC); 6432 adapter->stats.ictxptc += rd32(E1000_ICTXPTC); 6433 adapter->stats.ictxatc += rd32(E1000_ICTXATC); 6434 adapter->stats.ictxqec += rd32(E1000_ICTXQEC); 6435 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC); 6436 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); 6437 6438 /* Fill out the OS statistics structure */ 6439 net_stats->multicast = adapter->stats.mprc; 6440 net_stats->collisions = adapter->stats.colc; 6441 6442 /* Rx Errors */ 6443 6444 /* RLEC on some newer hardware can be incorrect so build 6445 * our own version based on RUC and ROC 6446 */ 6447 net_stats->rx_errors = adapter->stats.rxerrc + 6448 adapter->stats.crcerrs + adapter->stats.algnerrc + 6449 adapter->stats.ruc + adapter->stats.roc + 6450 adapter->stats.cexterr; 6451 net_stats->rx_length_errors = adapter->stats.ruc + 6452 adapter->stats.roc; 6453 net_stats->rx_crc_errors = adapter->stats.crcerrs; 6454 net_stats->rx_frame_errors = adapter->stats.algnerrc; 6455 net_stats->rx_missed_errors = adapter->stats.mpc; 6456 6457 /* Tx Errors */ 6458 net_stats->tx_errors = adapter->stats.ecol + 6459 adapter->stats.latecol; 6460 net_stats->tx_aborted_errors = adapter->stats.ecol; 6461 net_stats->tx_window_errors = adapter->stats.latecol; 6462 net_stats->tx_carrier_errors = adapter->stats.tncrs; 6463 6464 /* Tx Dropped needs to be maintained elsewhere */ 6465 6466 /* Management Stats */ 6467 adapter->stats.mgptc += rd32(E1000_MGTPTC); 6468 adapter->stats.mgprc += rd32(E1000_MGTPRC); 6469 adapter->stats.mgpdc += rd32(E1000_MGTPDC); 6470 6471 /* OS2BMC Stats */ 6472 reg = rd32(E1000_MANC); 6473 if (reg & E1000_MANC_EN_BMC2OS) { 6474 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC); 6475 adapter->stats.o2bspc += rd32(E1000_O2BSPC); 6476 adapter->stats.b2ospc += rd32(E1000_B2OSPC); 6477 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC); 6478 } 6479 } 6480 6481 static void igb_tsync_interrupt(struct igb_adapter *adapter) 6482 { 6483 struct e1000_hw *hw = &adapter->hw; 6484 struct ptp_clock_event event; 6485 struct timespec64 ts; 6486 u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR); 6487 6488 if (tsicr & TSINTR_SYS_WRAP) { 6489 event.type = PTP_CLOCK_PPS; 6490 if (adapter->ptp_caps.pps) 6491 ptp_clock_event(adapter->ptp_clock, &event); 6492 ack |= TSINTR_SYS_WRAP; 6493 } 6494 6495 if (tsicr & E1000_TSICR_TXTS) { 6496 /* retrieve hardware timestamp */ 6497 schedule_work(&adapter->ptp_tx_work); 6498 ack |= E1000_TSICR_TXTS; 6499 } 6500 6501 if (tsicr & TSINTR_TT0) { 6502 spin_lock(&adapter->tmreg_lock); 6503 ts = timespec64_add(adapter->perout[0].start, 6504 adapter->perout[0].period); 6505 /* u32 conversion of tv_sec is safe until y2106 */ 6506 wr32(E1000_TRGTTIML0, ts.tv_nsec); 6507 wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec); 6508 tsauxc = rd32(E1000_TSAUXC); 6509 tsauxc |= TSAUXC_EN_TT0; 6510 wr32(E1000_TSAUXC, tsauxc); 6511 adapter->perout[0].start = ts; 6512 spin_unlock(&adapter->tmreg_lock); 6513 ack |= TSINTR_TT0; 6514 } 6515 6516 if (tsicr & TSINTR_TT1) { 6517 spin_lock(&adapter->tmreg_lock); 6518 ts = timespec64_add(adapter->perout[1].start, 6519 adapter->perout[1].period); 6520 wr32(E1000_TRGTTIML1, ts.tv_nsec); 6521 wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec); 6522 tsauxc = rd32(E1000_TSAUXC); 6523 tsauxc |= TSAUXC_EN_TT1; 6524 wr32(E1000_TSAUXC, tsauxc); 6525 adapter->perout[1].start = ts; 6526 spin_unlock(&adapter->tmreg_lock); 6527 ack |= TSINTR_TT1; 6528 } 6529 6530 if (tsicr & TSINTR_AUTT0) { 6531 nsec = rd32(E1000_AUXSTMPL0); 6532 sec = rd32(E1000_AUXSTMPH0); 6533 event.type = PTP_CLOCK_EXTTS; 6534 event.index = 0; 6535 event.timestamp = sec * 1000000000ULL + nsec; 6536 ptp_clock_event(adapter->ptp_clock, &event); 6537 ack |= TSINTR_AUTT0; 6538 } 6539 6540 if (tsicr & TSINTR_AUTT1) { 6541 nsec = rd32(E1000_AUXSTMPL1); 6542 sec = rd32(E1000_AUXSTMPH1); 6543 event.type = PTP_CLOCK_EXTTS; 6544 event.index = 1; 6545 event.timestamp = sec * 1000000000ULL + nsec; 6546 ptp_clock_event(adapter->ptp_clock, &event); 6547 ack |= TSINTR_AUTT1; 6548 } 6549 6550 /* acknowledge the interrupts */ 6551 wr32(E1000_TSICR, ack); 6552 } 6553 6554 static irqreturn_t igb_msix_other(int irq, void *data) 6555 { 6556 struct igb_adapter *adapter = data; 6557 struct e1000_hw *hw = &adapter->hw; 6558 u32 icr = rd32(E1000_ICR); 6559 /* reading ICR causes bit 31 of EICR to be cleared */ 6560 6561 if (icr & E1000_ICR_DRSTA) 6562 schedule_work(&adapter->reset_task); 6563 6564 if (icr & E1000_ICR_DOUTSYNC) { 6565 /* HW is reporting DMA is out of sync */ 6566 adapter->stats.doosync++; 6567 /* The DMA Out of Sync is also indication of a spoof event 6568 * in IOV mode. Check the Wrong VM Behavior register to 6569 * see if it is really a spoof event. 6570 */ 6571 igb_check_wvbr(adapter); 6572 } 6573 6574 /* Check for a mailbox event */ 6575 if (icr & E1000_ICR_VMMB) 6576 igb_msg_task(adapter); 6577 6578 if (icr & E1000_ICR_LSC) { 6579 hw->mac.get_link_status = 1; 6580 /* guard against interrupt when we're going down */ 6581 if (!test_bit(__IGB_DOWN, &adapter->state)) 6582 mod_timer(&adapter->watchdog_timer, jiffies + 1); 6583 } 6584 6585 if (icr & E1000_ICR_TS) 6586 igb_tsync_interrupt(adapter); 6587 6588 wr32(E1000_EIMS, adapter->eims_other); 6589 6590 return IRQ_HANDLED; 6591 } 6592 6593 static void igb_write_itr(struct igb_q_vector *q_vector) 6594 { 6595 struct igb_adapter *adapter = q_vector->adapter; 6596 u32 itr_val = q_vector->itr_val & 0x7FFC; 6597 6598 if (!q_vector->set_itr) 6599 return; 6600 6601 if (!itr_val) 6602 itr_val = 0x4; 6603 6604 if (adapter->hw.mac.type == e1000_82575) 6605 itr_val |= itr_val << 16; 6606 else 6607 itr_val |= E1000_EITR_CNT_IGNR; 6608 6609 writel(itr_val, q_vector->itr_register); 6610 q_vector->set_itr = 0; 6611 } 6612 6613 static irqreturn_t igb_msix_ring(int irq, void *data) 6614 { 6615 struct igb_q_vector *q_vector = data; 6616 6617 /* Write the ITR value calculated from the previous interrupt. */ 6618 igb_write_itr(q_vector); 6619 6620 napi_schedule(&q_vector->napi); 6621 6622 return IRQ_HANDLED; 6623 } 6624 6625 #ifdef CONFIG_IGB_DCA 6626 static void igb_update_tx_dca(struct igb_adapter *adapter, 6627 struct igb_ring *tx_ring, 6628 int cpu) 6629 { 6630 struct e1000_hw *hw = &adapter->hw; 6631 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); 6632 6633 if (hw->mac.type != e1000_82575) 6634 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT; 6635 6636 /* We can enable relaxed ordering for reads, but not writes when 6637 * DCA is enabled. This is due to a known issue in some chipsets 6638 * which will cause the DCA tag to be cleared. 6639 */ 6640 txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN | 6641 E1000_DCA_TXCTRL_DATA_RRO_EN | 6642 E1000_DCA_TXCTRL_DESC_DCA_EN; 6643 6644 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl); 6645 } 6646 6647 static void igb_update_rx_dca(struct igb_adapter *adapter, 6648 struct igb_ring *rx_ring, 6649 int cpu) 6650 { 6651 struct e1000_hw *hw = &adapter->hw; 6652 u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu); 6653 6654 if (hw->mac.type != e1000_82575) 6655 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT; 6656 6657 /* We can enable relaxed ordering for reads, but not writes when 6658 * DCA is enabled. This is due to a known issue in some chipsets 6659 * which will cause the DCA tag to be cleared. 6660 */ 6661 rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN | 6662 E1000_DCA_RXCTRL_DESC_DCA_EN; 6663 6664 wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl); 6665 } 6666 6667 static void igb_update_dca(struct igb_q_vector *q_vector) 6668 { 6669 struct igb_adapter *adapter = q_vector->adapter; 6670 int cpu = get_cpu(); 6671 6672 if (q_vector->cpu == cpu) 6673 goto out_no_update; 6674 6675 if (q_vector->tx.ring) 6676 igb_update_tx_dca(adapter, q_vector->tx.ring, cpu); 6677 6678 if (q_vector->rx.ring) 6679 igb_update_rx_dca(adapter, q_vector->rx.ring, cpu); 6680 6681 q_vector->cpu = cpu; 6682 out_no_update: 6683 put_cpu(); 6684 } 6685 6686 static void igb_setup_dca(struct igb_adapter *adapter) 6687 { 6688 struct e1000_hw *hw = &adapter->hw; 6689 int i; 6690 6691 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED)) 6692 return; 6693 6694 /* Always use CB2 mode, difference is masked in the CB driver. */ 6695 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); 6696 6697 for (i = 0; i < adapter->num_q_vectors; i++) { 6698 adapter->q_vector[i]->cpu = -1; 6699 igb_update_dca(adapter->q_vector[i]); 6700 } 6701 } 6702 6703 static int __igb_notify_dca(struct device *dev, void *data) 6704 { 6705 struct net_device *netdev = dev_get_drvdata(dev); 6706 struct igb_adapter *adapter = netdev_priv(netdev); 6707 struct pci_dev *pdev = adapter->pdev; 6708 struct e1000_hw *hw = &adapter->hw; 6709 unsigned long event = *(unsigned long *)data; 6710 6711 switch (event) { 6712 case DCA_PROVIDER_ADD: 6713 /* if already enabled, don't do it again */ 6714 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 6715 break; 6716 if (dca_add_requester(dev) == 0) { 6717 adapter->flags |= IGB_FLAG_DCA_ENABLED; 6718 dev_info(&pdev->dev, "DCA enabled\n"); 6719 igb_setup_dca(adapter); 6720 break; 6721 } 6722 /* Fall Through since DCA is disabled. */ 6723 case DCA_PROVIDER_REMOVE: 6724 if (adapter->flags & IGB_FLAG_DCA_ENABLED) { 6725 /* without this a class_device is left 6726 * hanging around in the sysfs model 6727 */ 6728 dca_remove_requester(dev); 6729 dev_info(&pdev->dev, "DCA disabled\n"); 6730 adapter->flags &= ~IGB_FLAG_DCA_ENABLED; 6731 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); 6732 } 6733 break; 6734 } 6735 6736 return 0; 6737 } 6738 6739 static int igb_notify_dca(struct notifier_block *nb, unsigned long event, 6740 void *p) 6741 { 6742 int ret_val; 6743 6744 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event, 6745 __igb_notify_dca); 6746 6747 return ret_val ? NOTIFY_BAD : NOTIFY_DONE; 6748 } 6749 #endif /* CONFIG_IGB_DCA */ 6750 6751 #ifdef CONFIG_PCI_IOV 6752 static int igb_vf_configure(struct igb_adapter *adapter, int vf) 6753 { 6754 unsigned char mac_addr[ETH_ALEN]; 6755 6756 eth_zero_addr(mac_addr); 6757 igb_set_vf_mac(adapter, vf, mac_addr); 6758 6759 /* By default spoof check is enabled for all VFs */ 6760 adapter->vf_data[vf].spoofchk_enabled = true; 6761 6762 /* By default VFs are not trusted */ 6763 adapter->vf_data[vf].trusted = false; 6764 6765 return 0; 6766 } 6767 6768 #endif 6769 static void igb_ping_all_vfs(struct igb_adapter *adapter) 6770 { 6771 struct e1000_hw *hw = &adapter->hw; 6772 u32 ping; 6773 int i; 6774 6775 for (i = 0 ; i < adapter->vfs_allocated_count; i++) { 6776 ping = E1000_PF_CONTROL_MSG; 6777 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS) 6778 ping |= E1000_VT_MSGTYPE_CTS; 6779 igb_write_mbx(hw, &ping, 1, i); 6780 } 6781 } 6782 6783 static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) 6784 { 6785 struct e1000_hw *hw = &adapter->hw; 6786 u32 vmolr = rd32(E1000_VMOLR(vf)); 6787 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; 6788 6789 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC | 6790 IGB_VF_FLAG_MULTI_PROMISC); 6791 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); 6792 6793 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { 6794 vmolr |= E1000_VMOLR_MPME; 6795 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC; 6796 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; 6797 } else { 6798 /* if we have hashes and we are clearing a multicast promisc 6799 * flag we need to write the hashes to the MTA as this step 6800 * was previously skipped 6801 */ 6802 if (vf_data->num_vf_mc_hashes > 30) { 6803 vmolr |= E1000_VMOLR_MPME; 6804 } else if (vf_data->num_vf_mc_hashes) { 6805 int j; 6806 6807 vmolr |= E1000_VMOLR_ROMPE; 6808 for (j = 0; j < vf_data->num_vf_mc_hashes; j++) 6809 igb_mta_set(hw, vf_data->vf_mc_hashes[j]); 6810 } 6811 } 6812 6813 wr32(E1000_VMOLR(vf), vmolr); 6814 6815 /* there are flags left unprocessed, likely not supported */ 6816 if (*msgbuf & E1000_VT_MSGINFO_MASK) 6817 return -EINVAL; 6818 6819 return 0; 6820 } 6821 6822 static int igb_set_vf_multicasts(struct igb_adapter *adapter, 6823 u32 *msgbuf, u32 vf) 6824 { 6825 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; 6826 u16 *hash_list = (u16 *)&msgbuf[1]; 6827 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; 6828 int i; 6829 6830 /* salt away the number of multicast addresses assigned 6831 * to this VF for later use to restore when the PF multi cast 6832 * list changes 6833 */ 6834 vf_data->num_vf_mc_hashes = n; 6835 6836 /* only up to 30 hash values supported */ 6837 if (n > 30) 6838 n = 30; 6839 6840 /* store the hashes for later use */ 6841 for (i = 0; i < n; i++) 6842 vf_data->vf_mc_hashes[i] = hash_list[i]; 6843 6844 /* Flush and reset the mta with the new values */ 6845 igb_set_rx_mode(adapter->netdev); 6846 6847 return 0; 6848 } 6849 6850 static void igb_restore_vf_multicasts(struct igb_adapter *adapter) 6851 { 6852 struct e1000_hw *hw = &adapter->hw; 6853 struct vf_data_storage *vf_data; 6854 int i, j; 6855 6856 for (i = 0; i < adapter->vfs_allocated_count; i++) { 6857 u32 vmolr = rd32(E1000_VMOLR(i)); 6858 6859 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); 6860 6861 vf_data = &adapter->vf_data[i]; 6862 6863 if ((vf_data->num_vf_mc_hashes > 30) || 6864 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) { 6865 vmolr |= E1000_VMOLR_MPME; 6866 } else if (vf_data->num_vf_mc_hashes) { 6867 vmolr |= E1000_VMOLR_ROMPE; 6868 for (j = 0; j < vf_data->num_vf_mc_hashes; j++) 6869 igb_mta_set(hw, vf_data->vf_mc_hashes[j]); 6870 } 6871 wr32(E1000_VMOLR(i), vmolr); 6872 } 6873 } 6874 6875 static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) 6876 { 6877 struct e1000_hw *hw = &adapter->hw; 6878 u32 pool_mask, vlvf_mask, i; 6879 6880 /* create mask for VF and other pools */ 6881 pool_mask = E1000_VLVF_POOLSEL_MASK; 6882 vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf); 6883 6884 /* drop PF from pool bits */ 6885 pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT + 6886 adapter->vfs_allocated_count); 6887 6888 /* Find the vlan filter for this id */ 6889 for (i = E1000_VLVF_ARRAY_SIZE; i--;) { 6890 u32 vlvf = rd32(E1000_VLVF(i)); 6891 u32 vfta_mask, vid, vfta; 6892 6893 /* remove the vf from the pool */ 6894 if (!(vlvf & vlvf_mask)) 6895 continue; 6896 6897 /* clear out bit from VLVF */ 6898 vlvf ^= vlvf_mask; 6899 6900 /* if other pools are present, just remove ourselves */ 6901 if (vlvf & pool_mask) 6902 goto update_vlvfb; 6903 6904 /* if PF is present, leave VFTA */ 6905 if (vlvf & E1000_VLVF_POOLSEL_MASK) 6906 goto update_vlvf; 6907 6908 vid = vlvf & E1000_VLVF_VLANID_MASK; 6909 vfta_mask = BIT(vid % 32); 6910 6911 /* clear bit from VFTA */ 6912 vfta = adapter->shadow_vfta[vid / 32]; 6913 if (vfta & vfta_mask) 6914 hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask); 6915 update_vlvf: 6916 /* clear pool selection enable */ 6917 if (adapter->flags & IGB_FLAG_VLAN_PROMISC) 6918 vlvf &= E1000_VLVF_POOLSEL_MASK; 6919 else 6920 vlvf = 0; 6921 update_vlvfb: 6922 /* clear pool bits */ 6923 wr32(E1000_VLVF(i), vlvf); 6924 } 6925 } 6926 6927 static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan) 6928 { 6929 u32 vlvf; 6930 int idx; 6931 6932 /* short cut the special case */ 6933 if (vlan == 0) 6934 return 0; 6935 6936 /* Search for the VLAN id in the VLVF entries */ 6937 for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) { 6938 vlvf = rd32(E1000_VLVF(idx)); 6939 if ((vlvf & VLAN_VID_MASK) == vlan) 6940 break; 6941 } 6942 6943 return idx; 6944 } 6945 6946 static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid) 6947 { 6948 struct e1000_hw *hw = &adapter->hw; 6949 u32 bits, pf_id; 6950 int idx; 6951 6952 idx = igb_find_vlvf_entry(hw, vid); 6953 if (!idx) 6954 return; 6955 6956 /* See if any other pools are set for this VLAN filter 6957 * entry other than the PF. 6958 */ 6959 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; 6960 bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK; 6961 bits &= rd32(E1000_VLVF(idx)); 6962 6963 /* Disable the filter so this falls into the default pool. */ 6964 if (!bits) { 6965 if (adapter->flags & IGB_FLAG_VLAN_PROMISC) 6966 wr32(E1000_VLVF(idx), BIT(pf_id)); 6967 else 6968 wr32(E1000_VLVF(idx), 0); 6969 } 6970 } 6971 6972 static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid, 6973 bool add, u32 vf) 6974 { 6975 int pf_id = adapter->vfs_allocated_count; 6976 struct e1000_hw *hw = &adapter->hw; 6977 int err; 6978 6979 /* If VLAN overlaps with one the PF is currently monitoring make 6980 * sure that we are able to allocate a VLVF entry. This may be 6981 * redundant but it guarantees PF will maintain visibility to 6982 * the VLAN. 6983 */ 6984 if (add && test_bit(vid, adapter->active_vlans)) { 6985 err = igb_vfta_set(hw, vid, pf_id, true, false); 6986 if (err) 6987 return err; 6988 } 6989 6990 err = igb_vfta_set(hw, vid, vf, add, false); 6991 6992 if (add && !err) 6993 return err; 6994 6995 /* If we failed to add the VF VLAN or we are removing the VF VLAN 6996 * we may need to drop the PF pool bit in order to allow us to free 6997 * up the VLVF resources. 6998 */ 6999 if (test_bit(vid, adapter->active_vlans) || 7000 (adapter->flags & IGB_FLAG_VLAN_PROMISC)) 7001 igb_update_pf_vlvf(adapter, vid); 7002 7003 return err; 7004 } 7005 7006 static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) 7007 { 7008 struct e1000_hw *hw = &adapter->hw; 7009 7010 if (vid) 7011 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT)); 7012 else 7013 wr32(E1000_VMVIR(vf), 0); 7014 } 7015 7016 static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf, 7017 u16 vlan, u8 qos) 7018 { 7019 int err; 7020 7021 err = igb_set_vf_vlan(adapter, vlan, true, vf); 7022 if (err) 7023 return err; 7024 7025 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); 7026 igb_set_vmolr(adapter, vf, !vlan); 7027 7028 /* revoke access to previous VLAN */ 7029 if (vlan != adapter->vf_data[vf].pf_vlan) 7030 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan, 7031 false, vf); 7032 7033 adapter->vf_data[vf].pf_vlan = vlan; 7034 adapter->vf_data[vf].pf_qos = qos; 7035 igb_set_vf_vlan_strip(adapter, vf, true); 7036 dev_info(&adapter->pdev->dev, 7037 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); 7038 if (test_bit(__IGB_DOWN, &adapter->state)) { 7039 dev_warn(&adapter->pdev->dev, 7040 "The VF VLAN has been set, but the PF device is not up.\n"); 7041 dev_warn(&adapter->pdev->dev, 7042 "Bring the PF device up before attempting to use the VF device.\n"); 7043 } 7044 7045 return err; 7046 } 7047 7048 static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf) 7049 { 7050 /* Restore tagless access via VLAN 0 */ 7051 igb_set_vf_vlan(adapter, 0, true, vf); 7052 7053 igb_set_vmvir(adapter, 0, vf); 7054 igb_set_vmolr(adapter, vf, true); 7055 7056 /* Remove any PF assigned VLAN */ 7057 if (adapter->vf_data[vf].pf_vlan) 7058 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan, 7059 false, vf); 7060 7061 adapter->vf_data[vf].pf_vlan = 0; 7062 adapter->vf_data[vf].pf_qos = 0; 7063 igb_set_vf_vlan_strip(adapter, vf, false); 7064 7065 return 0; 7066 } 7067 7068 static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf, 7069 u16 vlan, u8 qos, __be16 vlan_proto) 7070 { 7071 struct igb_adapter *adapter = netdev_priv(netdev); 7072 7073 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) 7074 return -EINVAL; 7075 7076 if (vlan_proto != htons(ETH_P_8021Q)) 7077 return -EPROTONOSUPPORT; 7078 7079 return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) : 7080 igb_disable_port_vlan(adapter, vf); 7081 } 7082 7083 static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) 7084 { 7085 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; 7086 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); 7087 int ret; 7088 7089 if (adapter->vf_data[vf].pf_vlan) 7090 return -1; 7091 7092 /* VLAN 0 is a special case, don't allow it to be removed */ 7093 if (!vid && !add) 7094 return 0; 7095 7096 ret = igb_set_vf_vlan(adapter, vid, !!add, vf); 7097 if (!ret) 7098 igb_set_vf_vlan_strip(adapter, vf, !!vid); 7099 return ret; 7100 } 7101 7102 static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) 7103 { 7104 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; 7105 7106 /* clear flags - except flag that indicates PF has set the MAC */ 7107 vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC; 7108 vf_data->last_nack = jiffies; 7109 7110 /* reset vlans for device */ 7111 igb_clear_vf_vfta(adapter, vf); 7112 igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf); 7113 igb_set_vmvir(adapter, vf_data->pf_vlan | 7114 (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf); 7115 igb_set_vmolr(adapter, vf, !vf_data->pf_vlan); 7116 igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan)); 7117 7118 /* reset multicast table array for vf */ 7119 adapter->vf_data[vf].num_vf_mc_hashes = 0; 7120 7121 /* Flush and reset the mta with the new values */ 7122 igb_set_rx_mode(adapter->netdev); 7123 } 7124 7125 static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) 7126 { 7127 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; 7128 7129 /* clear mac address as we were hotplug removed/added */ 7130 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC)) 7131 eth_zero_addr(vf_mac); 7132 7133 /* process remaining reset events */ 7134 igb_vf_reset(adapter, vf); 7135 } 7136 7137 static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) 7138 { 7139 struct e1000_hw *hw = &adapter->hw; 7140 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; 7141 u32 reg, msgbuf[3]; 7142 u8 *addr = (u8 *)(&msgbuf[1]); 7143 7144 /* process all the same items cleared in a function level reset */ 7145 igb_vf_reset(adapter, vf); 7146 7147 /* set vf mac address */ 7148 igb_set_vf_mac(adapter, vf, vf_mac); 7149 7150 /* enable transmit and receive for vf */ 7151 reg = rd32(E1000_VFTE); 7152 wr32(E1000_VFTE, reg | BIT(vf)); 7153 reg = rd32(E1000_VFRE); 7154 wr32(E1000_VFRE, reg | BIT(vf)); 7155 7156 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; 7157 7158 /* reply to reset with ack and vf mac address */ 7159 if (!is_zero_ether_addr(vf_mac)) { 7160 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; 7161 memcpy(addr, vf_mac, ETH_ALEN); 7162 } else { 7163 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK; 7164 } 7165 igb_write_mbx(hw, msgbuf, 3, vf); 7166 } 7167 7168 static void igb_flush_mac_table(struct igb_adapter *adapter) 7169 { 7170 struct e1000_hw *hw = &adapter->hw; 7171 int i; 7172 7173 for (i = 0; i < hw->mac.rar_entry_count; i++) { 7174 adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE; 7175 memset(adapter->mac_table[i].addr, 0, ETH_ALEN); 7176 adapter->mac_table[i].queue = 0; 7177 igb_rar_set_index(adapter, i); 7178 } 7179 } 7180 7181 static int igb_available_rars(struct igb_adapter *adapter, u8 queue) 7182 { 7183 struct e1000_hw *hw = &adapter->hw; 7184 /* do not count rar entries reserved for VFs MAC addresses */ 7185 int rar_entries = hw->mac.rar_entry_count - 7186 adapter->vfs_allocated_count; 7187 int i, count = 0; 7188 7189 for (i = 0; i < rar_entries; i++) { 7190 /* do not count default entries */ 7191 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) 7192 continue; 7193 7194 /* do not count "in use" entries for different queues */ 7195 if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) && 7196 (adapter->mac_table[i].queue != queue)) 7197 continue; 7198 7199 count++; 7200 } 7201 7202 return count; 7203 } 7204 7205 /* Set default MAC address for the PF in the first RAR entry */ 7206 static void igb_set_default_mac_filter(struct igb_adapter *adapter) 7207 { 7208 struct igb_mac_addr *mac_table = &adapter->mac_table[0]; 7209 7210 ether_addr_copy(mac_table->addr, adapter->hw.mac.addr); 7211 mac_table->queue = adapter->vfs_allocated_count; 7212 mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE; 7213 7214 igb_rar_set_index(adapter, 0); 7215 } 7216 7217 /* If the filter to be added and an already existing filter express 7218 * the same address and address type, it should be possible to only 7219 * override the other configurations, for example the queue to steer 7220 * traffic. 7221 */ 7222 static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry, 7223 const u8 *addr, const u8 flags) 7224 { 7225 if (!(entry->state & IGB_MAC_STATE_IN_USE)) 7226 return true; 7227 7228 if ((entry->state & IGB_MAC_STATE_SRC_ADDR) != 7229 (flags & IGB_MAC_STATE_SRC_ADDR)) 7230 return false; 7231 7232 if (!ether_addr_equal(addr, entry->addr)) 7233 return false; 7234 7235 return true; 7236 } 7237 7238 /* Add a MAC filter for 'addr' directing matching traffic to 'queue', 7239 * 'flags' is used to indicate what kind of match is made, match is by 7240 * default for the destination address, if matching by source address 7241 * is desired the flag IGB_MAC_STATE_SRC_ADDR can be used. 7242 */ 7243 static int igb_add_mac_filter_flags(struct igb_adapter *adapter, 7244 const u8 *addr, const u8 queue, 7245 const u8 flags) 7246 { 7247 struct e1000_hw *hw = &adapter->hw; 7248 int rar_entries = hw->mac.rar_entry_count - 7249 adapter->vfs_allocated_count; 7250 int i; 7251 7252 if (is_zero_ether_addr(addr)) 7253 return -EINVAL; 7254 7255 /* Search for the first empty entry in the MAC table. 7256 * Do not touch entries at the end of the table reserved for the VF MAC 7257 * addresses. 7258 */ 7259 for (i = 0; i < rar_entries; i++) { 7260 if (!igb_mac_entry_can_be_used(&adapter->mac_table[i], 7261 addr, flags)) 7262 continue; 7263 7264 ether_addr_copy(adapter->mac_table[i].addr, addr); 7265 adapter->mac_table[i].queue = queue; 7266 adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags; 7267 7268 igb_rar_set_index(adapter, i); 7269 return i; 7270 } 7271 7272 return -ENOSPC; 7273 } 7274 7275 static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr, 7276 const u8 queue) 7277 { 7278 return igb_add_mac_filter_flags(adapter, addr, queue, 0); 7279 } 7280 7281 /* Remove a MAC filter for 'addr' directing matching traffic to 7282 * 'queue', 'flags' is used to indicate what kind of match need to be 7283 * removed, match is by default for the destination address, if 7284 * matching by source address is to be removed the flag 7285 * IGB_MAC_STATE_SRC_ADDR can be used. 7286 */ 7287 static int igb_del_mac_filter_flags(struct igb_adapter *adapter, 7288 const u8 *addr, const u8 queue, 7289 const u8 flags) 7290 { 7291 struct e1000_hw *hw = &adapter->hw; 7292 int rar_entries = hw->mac.rar_entry_count - 7293 adapter->vfs_allocated_count; 7294 int i; 7295 7296 if (is_zero_ether_addr(addr)) 7297 return -EINVAL; 7298 7299 /* Search for matching entry in the MAC table based on given address 7300 * and queue. Do not touch entries at the end of the table reserved 7301 * for the VF MAC addresses. 7302 */ 7303 for (i = 0; i < rar_entries; i++) { 7304 if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE)) 7305 continue; 7306 if ((adapter->mac_table[i].state & flags) != flags) 7307 continue; 7308 if (adapter->mac_table[i].queue != queue) 7309 continue; 7310 if (!ether_addr_equal(adapter->mac_table[i].addr, addr)) 7311 continue; 7312 7313 /* When a filter for the default address is "deleted", 7314 * we return it to its initial configuration 7315 */ 7316 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) { 7317 adapter->mac_table[i].state = 7318 IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE; 7319 adapter->mac_table[i].queue = 7320 adapter->vfs_allocated_count; 7321 } else { 7322 adapter->mac_table[i].state = 0; 7323 adapter->mac_table[i].queue = 0; 7324 memset(adapter->mac_table[i].addr, 0, ETH_ALEN); 7325 } 7326 7327 igb_rar_set_index(adapter, i); 7328 return 0; 7329 } 7330 7331 return -ENOENT; 7332 } 7333 7334 static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr, 7335 const u8 queue) 7336 { 7337 return igb_del_mac_filter_flags(adapter, addr, queue, 0); 7338 } 7339 7340 int igb_add_mac_steering_filter(struct igb_adapter *adapter, 7341 const u8 *addr, u8 queue, u8 flags) 7342 { 7343 struct e1000_hw *hw = &adapter->hw; 7344 7345 /* In theory, this should be supported on 82575 as well, but 7346 * that part wasn't easily accessible during development. 7347 */ 7348 if (hw->mac.type != e1000_i210) 7349 return -EOPNOTSUPP; 7350 7351 return igb_add_mac_filter_flags(adapter, addr, queue, 7352 IGB_MAC_STATE_QUEUE_STEERING | flags); 7353 } 7354 7355 int igb_del_mac_steering_filter(struct igb_adapter *adapter, 7356 const u8 *addr, u8 queue, u8 flags) 7357 { 7358 return igb_del_mac_filter_flags(adapter, addr, queue, 7359 IGB_MAC_STATE_QUEUE_STEERING | flags); 7360 } 7361 7362 static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr) 7363 { 7364 struct igb_adapter *adapter = netdev_priv(netdev); 7365 int ret; 7366 7367 ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count); 7368 7369 return min_t(int, ret, 0); 7370 } 7371 7372 static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr) 7373 { 7374 struct igb_adapter *adapter = netdev_priv(netdev); 7375 7376 igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count); 7377 7378 return 0; 7379 } 7380 7381 static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf, 7382 const u32 info, const u8 *addr) 7383 { 7384 struct pci_dev *pdev = adapter->pdev; 7385 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; 7386 struct list_head *pos; 7387 struct vf_mac_filter *entry = NULL; 7388 int ret = 0; 7389 7390 switch (info) { 7391 case E1000_VF_MAC_FILTER_CLR: 7392 /* remove all unicast MAC filters related to the current VF */ 7393 list_for_each(pos, &adapter->vf_macs.l) { 7394 entry = list_entry(pos, struct vf_mac_filter, l); 7395 if (entry->vf == vf) { 7396 entry->vf = -1; 7397 entry->free = true; 7398 igb_del_mac_filter(adapter, entry->vf_mac, vf); 7399 } 7400 } 7401 break; 7402 case E1000_VF_MAC_FILTER_ADD: 7403 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && 7404 !vf_data->trusted) { 7405 dev_warn(&pdev->dev, 7406 "VF %d requested MAC filter but is administratively denied\n", 7407 vf); 7408 return -EINVAL; 7409 } 7410 if (!is_valid_ether_addr(addr)) { 7411 dev_warn(&pdev->dev, 7412 "VF %d attempted to set invalid MAC filter\n", 7413 vf); 7414 return -EINVAL; 7415 } 7416 7417 /* try to find empty slot in the list */ 7418 list_for_each(pos, &adapter->vf_macs.l) { 7419 entry = list_entry(pos, struct vf_mac_filter, l); 7420 if (entry->free) 7421 break; 7422 } 7423 7424 if (entry && entry->free) { 7425 entry->free = false; 7426 entry->vf = vf; 7427 ether_addr_copy(entry->vf_mac, addr); 7428 7429 ret = igb_add_mac_filter(adapter, addr, vf); 7430 ret = min_t(int, ret, 0); 7431 } else { 7432 ret = -ENOSPC; 7433 } 7434 7435 if (ret == -ENOSPC) 7436 dev_warn(&pdev->dev, 7437 "VF %d has requested MAC filter but there is no space for it\n", 7438 vf); 7439 break; 7440 default: 7441 ret = -EINVAL; 7442 break; 7443 } 7444 7445 return ret; 7446 } 7447 7448 static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) 7449 { 7450 struct pci_dev *pdev = adapter->pdev; 7451 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; 7452 u32 info = msg[0] & E1000_VT_MSGINFO_MASK; 7453 7454 /* The VF MAC Address is stored in a packed array of bytes 7455 * starting at the second 32 bit word of the msg array 7456 */ 7457 unsigned char *addr = (unsigned char *)&msg[1]; 7458 int ret = 0; 7459 7460 if (!info) { 7461 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && 7462 !vf_data->trusted) { 7463 dev_warn(&pdev->dev, 7464 "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n", 7465 vf); 7466 return -EINVAL; 7467 } 7468 7469 if (!is_valid_ether_addr(addr)) { 7470 dev_warn(&pdev->dev, 7471 "VF %d attempted to set invalid MAC\n", 7472 vf); 7473 return -EINVAL; 7474 } 7475 7476 ret = igb_set_vf_mac(adapter, vf, addr); 7477 } else { 7478 ret = igb_set_vf_mac_filter(adapter, vf, info, addr); 7479 } 7480 7481 return ret; 7482 } 7483 7484 static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf) 7485 { 7486 struct e1000_hw *hw = &adapter->hw; 7487 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; 7488 u32 msg = E1000_VT_MSGTYPE_NACK; 7489 7490 /* if device isn't clear to send it shouldn't be reading either */ 7491 if (!(vf_data->flags & IGB_VF_FLAG_CTS) && 7492 time_after(jiffies, vf_data->last_nack + (2 * HZ))) { 7493 igb_write_mbx(hw, &msg, 1, vf); 7494 vf_data->last_nack = jiffies; 7495 } 7496 } 7497 7498 static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) 7499 { 7500 struct pci_dev *pdev = adapter->pdev; 7501 u32 msgbuf[E1000_VFMAILBOX_SIZE]; 7502 struct e1000_hw *hw = &adapter->hw; 7503 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; 7504 s32 retval; 7505 7506 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false); 7507 7508 if (retval) { 7509 /* if receive failed revoke VF CTS stats and restart init */ 7510 dev_err(&pdev->dev, "Error receiving message from VF\n"); 7511 vf_data->flags &= ~IGB_VF_FLAG_CTS; 7512 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) 7513 goto unlock; 7514 goto out; 7515 } 7516 7517 /* this is a message we already processed, do nothing */ 7518 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) 7519 goto unlock; 7520 7521 /* until the vf completes a reset it should not be 7522 * allowed to start any configuration. 7523 */ 7524 if (msgbuf[0] == E1000_VF_RESET) { 7525 /* unlocks mailbox */ 7526 igb_vf_reset_msg(adapter, vf); 7527 return; 7528 } 7529 7530 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) { 7531 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) 7532 goto unlock; 7533 retval = -1; 7534 goto out; 7535 } 7536 7537 switch ((msgbuf[0] & 0xFFFF)) { 7538 case E1000_VF_SET_MAC_ADDR: 7539 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); 7540 break; 7541 case E1000_VF_SET_PROMISC: 7542 retval = igb_set_vf_promisc(adapter, msgbuf, vf); 7543 break; 7544 case E1000_VF_SET_MULTICAST: 7545 retval = igb_set_vf_multicasts(adapter, msgbuf, vf); 7546 break; 7547 case E1000_VF_SET_LPE: 7548 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf); 7549 break; 7550 case E1000_VF_SET_VLAN: 7551 retval = -1; 7552 if (vf_data->pf_vlan) 7553 dev_warn(&pdev->dev, 7554 "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n", 7555 vf); 7556 else 7557 retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf); 7558 break; 7559 default: 7560 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); 7561 retval = -1; 7562 break; 7563 } 7564 7565 msgbuf[0] |= E1000_VT_MSGTYPE_CTS; 7566 out: 7567 /* notify the VF of the results of what it sent us */ 7568 if (retval) 7569 msgbuf[0] |= E1000_VT_MSGTYPE_NACK; 7570 else 7571 msgbuf[0] |= E1000_VT_MSGTYPE_ACK; 7572 7573 /* unlocks mailbox */ 7574 igb_write_mbx(hw, msgbuf, 1, vf); 7575 return; 7576 7577 unlock: 7578 igb_unlock_mbx(hw, vf); 7579 } 7580 7581 static void igb_msg_task(struct igb_adapter *adapter) 7582 { 7583 struct e1000_hw *hw = &adapter->hw; 7584 u32 vf; 7585 7586 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { 7587 /* process any reset requests */ 7588 if (!igb_check_for_rst(hw, vf)) 7589 igb_vf_reset_event(adapter, vf); 7590 7591 /* process any messages pending */ 7592 if (!igb_check_for_msg(hw, vf)) 7593 igb_rcv_msg_from_vf(adapter, vf); 7594 7595 /* process any acks */ 7596 if (!igb_check_for_ack(hw, vf)) 7597 igb_rcv_ack_from_vf(adapter, vf); 7598 } 7599 } 7600 7601 /** 7602 * igb_set_uta - Set unicast filter table address 7603 * @adapter: board private structure 7604 * @set: boolean indicating if we are setting or clearing bits 7605 * 7606 * The unicast table address is a register array of 32-bit registers. 7607 * The table is meant to be used in a way similar to how the MTA is used 7608 * however due to certain limitations in the hardware it is necessary to 7609 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous 7610 * enable bit to allow vlan tag stripping when promiscuous mode is enabled 7611 **/ 7612 static void igb_set_uta(struct igb_adapter *adapter, bool set) 7613 { 7614 struct e1000_hw *hw = &adapter->hw; 7615 u32 uta = set ? ~0 : 0; 7616 int i; 7617 7618 /* we only need to do this if VMDq is enabled */ 7619 if (!adapter->vfs_allocated_count) 7620 return; 7621 7622 for (i = hw->mac.uta_reg_count; i--;) 7623 array_wr32(E1000_UTA, i, uta); 7624 } 7625 7626 /** 7627 * igb_intr_msi - Interrupt Handler 7628 * @irq: interrupt number 7629 * @data: pointer to a network interface device structure 7630 **/ 7631 static irqreturn_t igb_intr_msi(int irq, void *data) 7632 { 7633 struct igb_adapter *adapter = data; 7634 struct igb_q_vector *q_vector = adapter->q_vector[0]; 7635 struct e1000_hw *hw = &adapter->hw; 7636 /* read ICR disables interrupts using IAM */ 7637 u32 icr = rd32(E1000_ICR); 7638 7639 igb_write_itr(q_vector); 7640 7641 if (icr & E1000_ICR_DRSTA) 7642 schedule_work(&adapter->reset_task); 7643 7644 if (icr & E1000_ICR_DOUTSYNC) { 7645 /* HW is reporting DMA is out of sync */ 7646 adapter->stats.doosync++; 7647 } 7648 7649 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 7650 hw->mac.get_link_status = 1; 7651 if (!test_bit(__IGB_DOWN, &adapter->state)) 7652 mod_timer(&adapter->watchdog_timer, jiffies + 1); 7653 } 7654 7655 if (icr & E1000_ICR_TS) 7656 igb_tsync_interrupt(adapter); 7657 7658 napi_schedule(&q_vector->napi); 7659 7660 return IRQ_HANDLED; 7661 } 7662 7663 /** 7664 * igb_intr - Legacy Interrupt Handler 7665 * @irq: interrupt number 7666 * @data: pointer to a network interface device structure 7667 **/ 7668 static irqreturn_t igb_intr(int irq, void *data) 7669 { 7670 struct igb_adapter *adapter = data; 7671 struct igb_q_vector *q_vector = adapter->q_vector[0]; 7672 struct e1000_hw *hw = &adapter->hw; 7673 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 7674 * need for the IMC write 7675 */ 7676 u32 icr = rd32(E1000_ICR); 7677 7678 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 7679 * not set, then the adapter didn't send an interrupt 7680 */ 7681 if (!(icr & E1000_ICR_INT_ASSERTED)) 7682 return IRQ_NONE; 7683 7684 igb_write_itr(q_vector); 7685 7686 if (icr & E1000_ICR_DRSTA) 7687 schedule_work(&adapter->reset_task); 7688 7689 if (icr & E1000_ICR_DOUTSYNC) { 7690 /* HW is reporting DMA is out of sync */ 7691 adapter->stats.doosync++; 7692 } 7693 7694 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 7695 hw->mac.get_link_status = 1; 7696 /* guard against interrupt when we're going down */ 7697 if (!test_bit(__IGB_DOWN, &adapter->state)) 7698 mod_timer(&adapter->watchdog_timer, jiffies + 1); 7699 } 7700 7701 if (icr & E1000_ICR_TS) 7702 igb_tsync_interrupt(adapter); 7703 7704 napi_schedule(&q_vector->napi); 7705 7706 return IRQ_HANDLED; 7707 } 7708 7709 static void igb_ring_irq_enable(struct igb_q_vector *q_vector) 7710 { 7711 struct igb_adapter *adapter = q_vector->adapter; 7712 struct e1000_hw *hw = &adapter->hw; 7713 7714 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || 7715 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { 7716 if ((adapter->num_q_vectors == 1) && !adapter->vf_data) 7717 igb_set_itr(q_vector); 7718 else 7719 igb_update_ring_itr(q_vector); 7720 } 7721 7722 if (!test_bit(__IGB_DOWN, &adapter->state)) { 7723 if (adapter->flags & IGB_FLAG_HAS_MSIX) 7724 wr32(E1000_EIMS, q_vector->eims_value); 7725 else 7726 igb_irq_enable(adapter); 7727 } 7728 } 7729 7730 /** 7731 * igb_poll - NAPI Rx polling callback 7732 * @napi: napi polling structure 7733 * @budget: count of how many packets we should handle 7734 **/ 7735 static int igb_poll(struct napi_struct *napi, int budget) 7736 { 7737 struct igb_q_vector *q_vector = container_of(napi, 7738 struct igb_q_vector, 7739 napi); 7740 bool clean_complete = true; 7741 int work_done = 0; 7742 7743 #ifdef CONFIG_IGB_DCA 7744 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) 7745 igb_update_dca(q_vector); 7746 #endif 7747 if (q_vector->tx.ring) 7748 clean_complete = igb_clean_tx_irq(q_vector, budget); 7749 7750 if (q_vector->rx.ring) { 7751 int cleaned = igb_clean_rx_irq(q_vector, budget); 7752 7753 work_done += cleaned; 7754 if (cleaned >= budget) 7755 clean_complete = false; 7756 } 7757 7758 /* If all work not completed, return budget and keep polling */ 7759 if (!clean_complete) 7760 return budget; 7761 7762 /* If not enough Rx work done, exit the polling mode */ 7763 napi_complete_done(napi, work_done); 7764 igb_ring_irq_enable(q_vector); 7765 7766 return 0; 7767 } 7768 7769 /** 7770 * igb_clean_tx_irq - Reclaim resources after transmit completes 7771 * @q_vector: pointer to q_vector containing needed info 7772 * @napi_budget: Used to determine if we are in netpoll 7773 * 7774 * returns true if ring is completely cleaned 7775 **/ 7776 static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) 7777 { 7778 struct igb_adapter *adapter = q_vector->adapter; 7779 struct igb_ring *tx_ring = q_vector->tx.ring; 7780 struct igb_tx_buffer *tx_buffer; 7781 union e1000_adv_tx_desc *tx_desc; 7782 unsigned int total_bytes = 0, total_packets = 0; 7783 unsigned int budget = q_vector->tx.work_limit; 7784 unsigned int i = tx_ring->next_to_clean; 7785 7786 if (test_bit(__IGB_DOWN, &adapter->state)) 7787 return true; 7788 7789 tx_buffer = &tx_ring->tx_buffer_info[i]; 7790 tx_desc = IGB_TX_DESC(tx_ring, i); 7791 i -= tx_ring->count; 7792 7793 do { 7794 union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; 7795 7796 /* if next_to_watch is not set then there is no work pending */ 7797 if (!eop_desc) 7798 break; 7799 7800 /* prevent any other reads prior to eop_desc */ 7801 smp_rmb(); 7802 7803 /* if DD is not set pending work has not been completed */ 7804 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) 7805 break; 7806 7807 /* clear next_to_watch to prevent false hangs */ 7808 tx_buffer->next_to_watch = NULL; 7809 7810 /* update the statistics for this packet */ 7811 total_bytes += tx_buffer->bytecount; 7812 total_packets += tx_buffer->gso_segs; 7813 7814 /* free the skb */ 7815 napi_consume_skb(tx_buffer->skb, napi_budget); 7816 7817 /* unmap skb header data */ 7818 dma_unmap_single(tx_ring->dev, 7819 dma_unmap_addr(tx_buffer, dma), 7820 dma_unmap_len(tx_buffer, len), 7821 DMA_TO_DEVICE); 7822 7823 /* clear tx_buffer data */ 7824 dma_unmap_len_set(tx_buffer, len, 0); 7825 7826 /* clear last DMA location and unmap remaining buffers */ 7827 while (tx_desc != eop_desc) { 7828 tx_buffer++; 7829 tx_desc++; 7830 i++; 7831 if (unlikely(!i)) { 7832 i -= tx_ring->count; 7833 tx_buffer = tx_ring->tx_buffer_info; 7834 tx_desc = IGB_TX_DESC(tx_ring, 0); 7835 } 7836 7837 /* unmap any remaining paged data */ 7838 if (dma_unmap_len(tx_buffer, len)) { 7839 dma_unmap_page(tx_ring->dev, 7840 dma_unmap_addr(tx_buffer, dma), 7841 dma_unmap_len(tx_buffer, len), 7842 DMA_TO_DEVICE); 7843 dma_unmap_len_set(tx_buffer, len, 0); 7844 } 7845 } 7846 7847 /* move us one more past the eop_desc for start of next pkt */ 7848 tx_buffer++; 7849 tx_desc++; 7850 i++; 7851 if (unlikely(!i)) { 7852 i -= tx_ring->count; 7853 tx_buffer = tx_ring->tx_buffer_info; 7854 tx_desc = IGB_TX_DESC(tx_ring, 0); 7855 } 7856 7857 /* issue prefetch for next Tx descriptor */ 7858 prefetch(tx_desc); 7859 7860 /* update budget accounting */ 7861 budget--; 7862 } while (likely(budget)); 7863 7864 netdev_tx_completed_queue(txring_txq(tx_ring), 7865 total_packets, total_bytes); 7866 i += tx_ring->count; 7867 tx_ring->next_to_clean = i; 7868 u64_stats_update_begin(&tx_ring->tx_syncp); 7869 tx_ring->tx_stats.bytes += total_bytes; 7870 tx_ring->tx_stats.packets += total_packets; 7871 u64_stats_update_end(&tx_ring->tx_syncp); 7872 q_vector->tx.total_bytes += total_bytes; 7873 q_vector->tx.total_packets += total_packets; 7874 7875 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { 7876 struct e1000_hw *hw = &adapter->hw; 7877 7878 /* Detect a transmit hang in hardware, this serializes the 7879 * check with the clearing of time_stamp and movement of i 7880 */ 7881 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 7882 if (tx_buffer->next_to_watch && 7883 time_after(jiffies, tx_buffer->time_stamp + 7884 (adapter->tx_timeout_factor * HZ)) && 7885 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { 7886 7887 /* detected Tx unit hang */ 7888 dev_err(tx_ring->dev, 7889 "Detected Tx Unit Hang\n" 7890 " Tx Queue <%d>\n" 7891 " TDH <%x>\n" 7892 " TDT <%x>\n" 7893 " next_to_use <%x>\n" 7894 " next_to_clean <%x>\n" 7895 "buffer_info[next_to_clean]\n" 7896 " time_stamp <%lx>\n" 7897 " next_to_watch <%p>\n" 7898 " jiffies <%lx>\n" 7899 " desc.status <%x>\n", 7900 tx_ring->queue_index, 7901 rd32(E1000_TDH(tx_ring->reg_idx)), 7902 readl(tx_ring->tail), 7903 tx_ring->next_to_use, 7904 tx_ring->next_to_clean, 7905 tx_buffer->time_stamp, 7906 tx_buffer->next_to_watch, 7907 jiffies, 7908 tx_buffer->next_to_watch->wb.status); 7909 netif_stop_subqueue(tx_ring->netdev, 7910 tx_ring->queue_index); 7911 7912 /* we are about to reset, no point in enabling stuff */ 7913 return true; 7914 } 7915 } 7916 7917 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 7918 if (unlikely(total_packets && 7919 netif_carrier_ok(tx_ring->netdev) && 7920 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { 7921 /* Make sure that anybody stopping the queue after this 7922 * sees the new next_to_clean. 7923 */ 7924 smp_mb(); 7925 if (__netif_subqueue_stopped(tx_ring->netdev, 7926 tx_ring->queue_index) && 7927 !(test_bit(__IGB_DOWN, &adapter->state))) { 7928 netif_wake_subqueue(tx_ring->netdev, 7929 tx_ring->queue_index); 7930 7931 u64_stats_update_begin(&tx_ring->tx_syncp); 7932 tx_ring->tx_stats.restart_queue++; 7933 u64_stats_update_end(&tx_ring->tx_syncp); 7934 } 7935 } 7936 7937 return !!budget; 7938 } 7939 7940 /** 7941 * igb_reuse_rx_page - page flip buffer and store it back on the ring 7942 * @rx_ring: rx descriptor ring to store buffers on 7943 * @old_buff: donor buffer to have page reused 7944 * 7945 * Synchronizes page for reuse by the adapter 7946 **/ 7947 static void igb_reuse_rx_page(struct igb_ring *rx_ring, 7948 struct igb_rx_buffer *old_buff) 7949 { 7950 struct igb_rx_buffer *new_buff; 7951 u16 nta = rx_ring->next_to_alloc; 7952 7953 new_buff = &rx_ring->rx_buffer_info[nta]; 7954 7955 /* update, and store next to alloc */ 7956 nta++; 7957 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 7958 7959 /* Transfer page from old buffer to new buffer. 7960 * Move each member individually to avoid possible store 7961 * forwarding stalls. 7962 */ 7963 new_buff->dma = old_buff->dma; 7964 new_buff->page = old_buff->page; 7965 new_buff->page_offset = old_buff->page_offset; 7966 new_buff->pagecnt_bias = old_buff->pagecnt_bias; 7967 } 7968 7969 static inline bool igb_page_is_reserved(struct page *page) 7970 { 7971 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 7972 } 7973 7974 static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer) 7975 { 7976 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 7977 struct page *page = rx_buffer->page; 7978 7979 /* avoid re-using remote pages */ 7980 if (unlikely(igb_page_is_reserved(page))) 7981 return false; 7982 7983 #if (PAGE_SIZE < 8192) 7984 /* if we are only owner of page we can reuse it */ 7985 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) 7986 return false; 7987 #else 7988 #define IGB_LAST_OFFSET \ 7989 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048) 7990 7991 if (rx_buffer->page_offset > IGB_LAST_OFFSET) 7992 return false; 7993 #endif 7994 7995 /* If we have drained the page fragment pool we need to update 7996 * the pagecnt_bias and page count so that we fully restock the 7997 * number of references the driver holds. 7998 */ 7999 if (unlikely(!pagecnt_bias)) { 8000 page_ref_add(page, USHRT_MAX); 8001 rx_buffer->pagecnt_bias = USHRT_MAX; 8002 } 8003 8004 return true; 8005 } 8006 8007 /** 8008 * igb_add_rx_frag - Add contents of Rx buffer to sk_buff 8009 * @rx_ring: rx descriptor ring to transact packets on 8010 * @rx_buffer: buffer containing page to add 8011 * @skb: sk_buff to place the data into 8012 * @size: size of buffer to be added 8013 * 8014 * This function will add the data contained in rx_buffer->page to the skb. 8015 **/ 8016 static void igb_add_rx_frag(struct igb_ring *rx_ring, 8017 struct igb_rx_buffer *rx_buffer, 8018 struct sk_buff *skb, 8019 unsigned int size) 8020 { 8021 #if (PAGE_SIZE < 8192) 8022 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; 8023 #else 8024 unsigned int truesize = ring_uses_build_skb(rx_ring) ? 8025 SKB_DATA_ALIGN(IGB_SKB_PAD + size) : 8026 SKB_DATA_ALIGN(size); 8027 #endif 8028 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, 8029 rx_buffer->page_offset, size, truesize); 8030 #if (PAGE_SIZE < 8192) 8031 rx_buffer->page_offset ^= truesize; 8032 #else 8033 rx_buffer->page_offset += truesize; 8034 #endif 8035 } 8036 8037 static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, 8038 struct igb_rx_buffer *rx_buffer, 8039 union e1000_adv_rx_desc *rx_desc, 8040 unsigned int size) 8041 { 8042 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; 8043 #if (PAGE_SIZE < 8192) 8044 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; 8045 #else 8046 unsigned int truesize = SKB_DATA_ALIGN(size); 8047 #endif 8048 unsigned int headlen; 8049 struct sk_buff *skb; 8050 8051 /* prefetch first cache line of first page */ 8052 prefetch(va); 8053 #if L1_CACHE_BYTES < 128 8054 prefetch(va + L1_CACHE_BYTES); 8055 #endif 8056 8057 /* allocate a skb to store the frags */ 8058 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); 8059 if (unlikely(!skb)) 8060 return NULL; 8061 8062 if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) { 8063 igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); 8064 va += IGB_TS_HDR_LEN; 8065 size -= IGB_TS_HDR_LEN; 8066 } 8067 8068 /* Determine available headroom for copy */ 8069 headlen = size; 8070 if (headlen > IGB_RX_HDR_LEN) 8071 headlen = eth_get_headlen(va, IGB_RX_HDR_LEN); 8072 8073 /* align pull length to size of long to optimize memcpy performance */ 8074 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); 8075 8076 /* update all of the pointers */ 8077 size -= headlen; 8078 if (size) { 8079 skb_add_rx_frag(skb, 0, rx_buffer->page, 8080 (va + headlen) - page_address(rx_buffer->page), 8081 size, truesize); 8082 #if (PAGE_SIZE < 8192) 8083 rx_buffer->page_offset ^= truesize; 8084 #else 8085 rx_buffer->page_offset += truesize; 8086 #endif 8087 } else { 8088 rx_buffer->pagecnt_bias++; 8089 } 8090 8091 return skb; 8092 } 8093 8094 static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, 8095 struct igb_rx_buffer *rx_buffer, 8096 union e1000_adv_rx_desc *rx_desc, 8097 unsigned int size) 8098 { 8099 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; 8100 #if (PAGE_SIZE < 8192) 8101 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; 8102 #else 8103 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 8104 SKB_DATA_ALIGN(IGB_SKB_PAD + size); 8105 #endif 8106 struct sk_buff *skb; 8107 8108 /* prefetch first cache line of first page */ 8109 prefetch(va); 8110 #if L1_CACHE_BYTES < 128 8111 prefetch(va + L1_CACHE_BYTES); 8112 #endif 8113 8114 /* build an skb around the page buffer */ 8115 skb = build_skb(va - IGB_SKB_PAD, truesize); 8116 if (unlikely(!skb)) 8117 return NULL; 8118 8119 /* update pointers within the skb to store the data */ 8120 skb_reserve(skb, IGB_SKB_PAD); 8121 __skb_put(skb, size); 8122 8123 /* pull timestamp out of packet data */ 8124 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { 8125 igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb); 8126 __skb_pull(skb, IGB_TS_HDR_LEN); 8127 } 8128 8129 /* update buffer offset */ 8130 #if (PAGE_SIZE < 8192) 8131 rx_buffer->page_offset ^= truesize; 8132 #else 8133 rx_buffer->page_offset += truesize; 8134 #endif 8135 8136 return skb; 8137 } 8138 8139 static inline void igb_rx_checksum(struct igb_ring *ring, 8140 union e1000_adv_rx_desc *rx_desc, 8141 struct sk_buff *skb) 8142 { 8143 skb_checksum_none_assert(skb); 8144 8145 /* Ignore Checksum bit is set */ 8146 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM)) 8147 return; 8148 8149 /* Rx checksum disabled via ethtool */ 8150 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 8151 return; 8152 8153 /* TCP/UDP checksum error bit is set */ 8154 if (igb_test_staterr(rx_desc, 8155 E1000_RXDEXT_STATERR_TCPE | 8156 E1000_RXDEXT_STATERR_IPE)) { 8157 /* work around errata with sctp packets where the TCPE aka 8158 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) 8159 * packets, (aka let the stack check the crc32c) 8160 */ 8161 if (!((skb->len == 60) && 8162 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { 8163 u64_stats_update_begin(&ring->rx_syncp); 8164 ring->rx_stats.csum_err++; 8165 u64_stats_update_end(&ring->rx_syncp); 8166 } 8167 /* let the stack verify checksum errors */ 8168 return; 8169 } 8170 /* It must be a TCP or UDP packet with a valid checksum */ 8171 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS | 8172 E1000_RXD_STAT_UDPCS)) 8173 skb->ip_summed = CHECKSUM_UNNECESSARY; 8174 8175 dev_dbg(ring->dev, "cksum success: bits %08X\n", 8176 le32_to_cpu(rx_desc->wb.upper.status_error)); 8177 } 8178 8179 static inline void igb_rx_hash(struct igb_ring *ring, 8180 union e1000_adv_rx_desc *rx_desc, 8181 struct sk_buff *skb) 8182 { 8183 if (ring->netdev->features & NETIF_F_RXHASH) 8184 skb_set_hash(skb, 8185 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), 8186 PKT_HASH_TYPE_L3); 8187 } 8188 8189 /** 8190 * igb_is_non_eop - process handling of non-EOP buffers 8191 * @rx_ring: Rx ring being processed 8192 * @rx_desc: Rx descriptor for current buffer 8193 * @skb: current socket buffer containing buffer in progress 8194 * 8195 * This function updates next to clean. If the buffer is an EOP buffer 8196 * this function exits returning false, otherwise it will place the 8197 * sk_buff in the next buffer to be chained and return true indicating 8198 * that this is in fact a non-EOP buffer. 8199 **/ 8200 static bool igb_is_non_eop(struct igb_ring *rx_ring, 8201 union e1000_adv_rx_desc *rx_desc) 8202 { 8203 u32 ntc = rx_ring->next_to_clean + 1; 8204 8205 /* fetch, update, and store next to clean */ 8206 ntc = (ntc < rx_ring->count) ? ntc : 0; 8207 rx_ring->next_to_clean = ntc; 8208 8209 prefetch(IGB_RX_DESC(rx_ring, ntc)); 8210 8211 if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))) 8212 return false; 8213 8214 return true; 8215 } 8216 8217 /** 8218 * igb_cleanup_headers - Correct corrupted or empty headers 8219 * @rx_ring: rx descriptor ring packet is being transacted on 8220 * @rx_desc: pointer to the EOP Rx descriptor 8221 * @skb: pointer to current skb being fixed 8222 * 8223 * Address the case where we are pulling data in on pages only 8224 * and as such no data is present in the skb header. 8225 * 8226 * In addition if skb is not at least 60 bytes we need to pad it so that 8227 * it is large enough to qualify as a valid Ethernet frame. 8228 * 8229 * Returns true if an error was encountered and skb was freed. 8230 **/ 8231 static bool igb_cleanup_headers(struct igb_ring *rx_ring, 8232 union e1000_adv_rx_desc *rx_desc, 8233 struct sk_buff *skb) 8234 { 8235 if (unlikely((igb_test_staterr(rx_desc, 8236 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { 8237 struct net_device *netdev = rx_ring->netdev; 8238 if (!(netdev->features & NETIF_F_RXALL)) { 8239 dev_kfree_skb_any(skb); 8240 return true; 8241 } 8242 } 8243 8244 /* if eth_skb_pad returns an error the skb was freed */ 8245 if (eth_skb_pad(skb)) 8246 return true; 8247 8248 return false; 8249 } 8250 8251 /** 8252 * igb_process_skb_fields - Populate skb header fields from Rx descriptor 8253 * @rx_ring: rx descriptor ring packet is being transacted on 8254 * @rx_desc: pointer to the EOP Rx descriptor 8255 * @skb: pointer to current skb being populated 8256 * 8257 * This function checks the ring, descriptor, and packet information in 8258 * order to populate the hash, checksum, VLAN, timestamp, protocol, and 8259 * other fields within the skb. 8260 **/ 8261 static void igb_process_skb_fields(struct igb_ring *rx_ring, 8262 union e1000_adv_rx_desc *rx_desc, 8263 struct sk_buff *skb) 8264 { 8265 struct net_device *dev = rx_ring->netdev; 8266 8267 igb_rx_hash(rx_ring, rx_desc, skb); 8268 8269 igb_rx_checksum(rx_ring, rx_desc, skb); 8270 8271 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && 8272 !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) 8273 igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); 8274 8275 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 8276 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { 8277 u16 vid; 8278 8279 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && 8280 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) 8281 vid = be16_to_cpu(rx_desc->wb.upper.vlan); 8282 else 8283 vid = le16_to_cpu(rx_desc->wb.upper.vlan); 8284 8285 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 8286 } 8287 8288 skb_record_rx_queue(skb, rx_ring->queue_index); 8289 8290 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 8291 } 8292 8293 static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring, 8294 const unsigned int size) 8295 { 8296 struct igb_rx_buffer *rx_buffer; 8297 8298 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 8299 prefetchw(rx_buffer->page); 8300 8301 /* we are reusing so sync this buffer for CPU use */ 8302 dma_sync_single_range_for_cpu(rx_ring->dev, 8303 rx_buffer->dma, 8304 rx_buffer->page_offset, 8305 size, 8306 DMA_FROM_DEVICE); 8307 8308 rx_buffer->pagecnt_bias--; 8309 8310 return rx_buffer; 8311 } 8312 8313 static void igb_put_rx_buffer(struct igb_ring *rx_ring, 8314 struct igb_rx_buffer *rx_buffer) 8315 { 8316 if (igb_can_reuse_rx_page(rx_buffer)) { 8317 /* hand second half of page back to the ring */ 8318 igb_reuse_rx_page(rx_ring, rx_buffer); 8319 } else { 8320 /* We are not reusing the buffer so unmap it and free 8321 * any references we are holding to it 8322 */ 8323 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, 8324 igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 8325 IGB_RX_DMA_ATTR); 8326 __page_frag_cache_drain(rx_buffer->page, 8327 rx_buffer->pagecnt_bias); 8328 } 8329 8330 /* clear contents of rx_buffer */ 8331 rx_buffer->page = NULL; 8332 } 8333 8334 static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) 8335 { 8336 struct igb_ring *rx_ring = q_vector->rx.ring; 8337 struct sk_buff *skb = rx_ring->skb; 8338 unsigned int total_bytes = 0, total_packets = 0; 8339 u16 cleaned_count = igb_desc_unused(rx_ring); 8340 8341 while (likely(total_packets < budget)) { 8342 union e1000_adv_rx_desc *rx_desc; 8343 struct igb_rx_buffer *rx_buffer; 8344 unsigned int size; 8345 8346 /* return some buffers to hardware, one at a time is too slow */ 8347 if (cleaned_count >= IGB_RX_BUFFER_WRITE) { 8348 igb_alloc_rx_buffers(rx_ring, cleaned_count); 8349 cleaned_count = 0; 8350 } 8351 8352 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean); 8353 size = le16_to_cpu(rx_desc->wb.upper.length); 8354 if (!size) 8355 break; 8356 8357 /* This memory barrier is needed to keep us from reading 8358 * any other fields out of the rx_desc until we know the 8359 * descriptor has been written back 8360 */ 8361 dma_rmb(); 8362 8363 rx_buffer = igb_get_rx_buffer(rx_ring, size); 8364 8365 /* retrieve a buffer from the ring */ 8366 if (skb) 8367 igb_add_rx_frag(rx_ring, rx_buffer, skb, size); 8368 else if (ring_uses_build_skb(rx_ring)) 8369 skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size); 8370 else 8371 skb = igb_construct_skb(rx_ring, rx_buffer, 8372 rx_desc, size); 8373 8374 /* exit if we failed to retrieve a buffer */ 8375 if (!skb) { 8376 rx_ring->rx_stats.alloc_failed++; 8377 rx_buffer->pagecnt_bias++; 8378 break; 8379 } 8380 8381 igb_put_rx_buffer(rx_ring, rx_buffer); 8382 cleaned_count++; 8383 8384 /* fetch next buffer in frame if non-eop */ 8385 if (igb_is_non_eop(rx_ring, rx_desc)) 8386 continue; 8387 8388 /* verify the packet layout is correct */ 8389 if (igb_cleanup_headers(rx_ring, rx_desc, skb)) { 8390 skb = NULL; 8391 continue; 8392 } 8393 8394 /* probably a little skewed due to removing CRC */ 8395 total_bytes += skb->len; 8396 8397 /* populate checksum, timestamp, VLAN, and protocol */ 8398 igb_process_skb_fields(rx_ring, rx_desc, skb); 8399 8400 napi_gro_receive(&q_vector->napi, skb); 8401 8402 /* reset skb pointer */ 8403 skb = NULL; 8404 8405 /* update budget accounting */ 8406 total_packets++; 8407 } 8408 8409 /* place incomplete frames back on ring for completion */ 8410 rx_ring->skb = skb; 8411 8412 u64_stats_update_begin(&rx_ring->rx_syncp); 8413 rx_ring->rx_stats.packets += total_packets; 8414 rx_ring->rx_stats.bytes += total_bytes; 8415 u64_stats_update_end(&rx_ring->rx_syncp); 8416 q_vector->rx.total_packets += total_packets; 8417 q_vector->rx.total_bytes += total_bytes; 8418 8419 if (cleaned_count) 8420 igb_alloc_rx_buffers(rx_ring, cleaned_count); 8421 8422 return total_packets; 8423 } 8424 8425 static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring) 8426 { 8427 return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0; 8428 } 8429 8430 static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, 8431 struct igb_rx_buffer *bi) 8432 { 8433 struct page *page = bi->page; 8434 dma_addr_t dma; 8435 8436 /* since we are recycling buffers we should seldom need to alloc */ 8437 if (likely(page)) 8438 return true; 8439 8440 /* alloc new page for storage */ 8441 page = dev_alloc_pages(igb_rx_pg_order(rx_ring)); 8442 if (unlikely(!page)) { 8443 rx_ring->rx_stats.alloc_failed++; 8444 return false; 8445 } 8446 8447 /* map page for use */ 8448 dma = dma_map_page_attrs(rx_ring->dev, page, 0, 8449 igb_rx_pg_size(rx_ring), 8450 DMA_FROM_DEVICE, 8451 IGB_RX_DMA_ATTR); 8452 8453 /* if mapping failed free memory back to system since 8454 * there isn't much point in holding memory we can't use 8455 */ 8456 if (dma_mapping_error(rx_ring->dev, dma)) { 8457 __free_pages(page, igb_rx_pg_order(rx_ring)); 8458 8459 rx_ring->rx_stats.alloc_failed++; 8460 return false; 8461 } 8462 8463 bi->dma = dma; 8464 bi->page = page; 8465 bi->page_offset = igb_rx_offset(rx_ring); 8466 bi->pagecnt_bias = 1; 8467 8468 return true; 8469 } 8470 8471 /** 8472 * igb_alloc_rx_buffers - Replace used receive buffers; packet split 8473 * @adapter: address of board private structure 8474 **/ 8475 void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) 8476 { 8477 union e1000_adv_rx_desc *rx_desc; 8478 struct igb_rx_buffer *bi; 8479 u16 i = rx_ring->next_to_use; 8480 u16 bufsz; 8481 8482 /* nothing to do */ 8483 if (!cleaned_count) 8484 return; 8485 8486 rx_desc = IGB_RX_DESC(rx_ring, i); 8487 bi = &rx_ring->rx_buffer_info[i]; 8488 i -= rx_ring->count; 8489 8490 bufsz = igb_rx_bufsz(rx_ring); 8491 8492 do { 8493 if (!igb_alloc_mapped_page(rx_ring, bi)) 8494 break; 8495 8496 /* sync the buffer for use by the device */ 8497 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 8498 bi->page_offset, bufsz, 8499 DMA_FROM_DEVICE); 8500 8501 /* Refresh the desc even if buffer_addrs didn't change 8502 * because each write-back erases this info. 8503 */ 8504 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 8505 8506 rx_desc++; 8507 bi++; 8508 i++; 8509 if (unlikely(!i)) { 8510 rx_desc = IGB_RX_DESC(rx_ring, 0); 8511 bi = rx_ring->rx_buffer_info; 8512 i -= rx_ring->count; 8513 } 8514 8515 /* clear the length for the next_to_use descriptor */ 8516 rx_desc->wb.upper.length = 0; 8517 8518 cleaned_count--; 8519 } while (cleaned_count); 8520 8521 i += rx_ring->count; 8522 8523 if (rx_ring->next_to_use != i) { 8524 /* record the next descriptor to use */ 8525 rx_ring->next_to_use = i; 8526 8527 /* update next to alloc since we have filled the ring */ 8528 rx_ring->next_to_alloc = i; 8529 8530 /* Force memory writes to complete before letting h/w 8531 * know there are new descriptors to fetch. (Only 8532 * applicable for weak-ordered memory model archs, 8533 * such as IA-64). 8534 */ 8535 dma_wmb(); 8536 writel(i, rx_ring->tail); 8537 } 8538 } 8539 8540 /** 8541 * igb_mii_ioctl - 8542 * @netdev: 8543 * @ifreq: 8544 * @cmd: 8545 **/ 8546 static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 8547 { 8548 struct igb_adapter *adapter = netdev_priv(netdev); 8549 struct mii_ioctl_data *data = if_mii(ifr); 8550 8551 if (adapter->hw.phy.media_type != e1000_media_type_copper) 8552 return -EOPNOTSUPP; 8553 8554 switch (cmd) { 8555 case SIOCGMIIPHY: 8556 data->phy_id = adapter->hw.phy.addr; 8557 break; 8558 case SIOCGMIIREG: 8559 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, 8560 &data->val_out)) 8561 return -EIO; 8562 break; 8563 case SIOCSMIIREG: 8564 default: 8565 return -EOPNOTSUPP; 8566 } 8567 return 0; 8568 } 8569 8570 /** 8571 * igb_ioctl - 8572 * @netdev: 8573 * @ifreq: 8574 * @cmd: 8575 **/ 8576 static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 8577 { 8578 switch (cmd) { 8579 case SIOCGMIIPHY: 8580 case SIOCGMIIREG: 8581 case SIOCSMIIREG: 8582 return igb_mii_ioctl(netdev, ifr, cmd); 8583 case SIOCGHWTSTAMP: 8584 return igb_ptp_get_ts_config(netdev, ifr); 8585 case SIOCSHWTSTAMP: 8586 return igb_ptp_set_ts_config(netdev, ifr); 8587 default: 8588 return -EOPNOTSUPP; 8589 } 8590 } 8591 8592 void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) 8593 { 8594 struct igb_adapter *adapter = hw->back; 8595 8596 pci_read_config_word(adapter->pdev, reg, value); 8597 } 8598 8599 void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) 8600 { 8601 struct igb_adapter *adapter = hw->back; 8602 8603 pci_write_config_word(adapter->pdev, reg, *value); 8604 } 8605 8606 s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) 8607 { 8608 struct igb_adapter *adapter = hw->back; 8609 8610 if (pcie_capability_read_word(adapter->pdev, reg, value)) 8611 return -E1000_ERR_CONFIG; 8612 8613 return 0; 8614 } 8615 8616 s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) 8617 { 8618 struct igb_adapter *adapter = hw->back; 8619 8620 if (pcie_capability_write_word(adapter->pdev, reg, *value)) 8621 return -E1000_ERR_CONFIG; 8622 8623 return 0; 8624 } 8625 8626 static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features) 8627 { 8628 struct igb_adapter *adapter = netdev_priv(netdev); 8629 struct e1000_hw *hw = &adapter->hw; 8630 u32 ctrl, rctl; 8631 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); 8632 8633 if (enable) { 8634 /* enable VLAN tag insert/strip */ 8635 ctrl = rd32(E1000_CTRL); 8636 ctrl |= E1000_CTRL_VME; 8637 wr32(E1000_CTRL, ctrl); 8638 8639 /* Disable CFI check */ 8640 rctl = rd32(E1000_RCTL); 8641 rctl &= ~E1000_RCTL_CFIEN; 8642 wr32(E1000_RCTL, rctl); 8643 } else { 8644 /* disable VLAN tag insert/strip */ 8645 ctrl = rd32(E1000_CTRL); 8646 ctrl &= ~E1000_CTRL_VME; 8647 wr32(E1000_CTRL, ctrl); 8648 } 8649 8650 igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable); 8651 } 8652 8653 static int igb_vlan_rx_add_vid(struct net_device *netdev, 8654 __be16 proto, u16 vid) 8655 { 8656 struct igb_adapter *adapter = netdev_priv(netdev); 8657 struct e1000_hw *hw = &adapter->hw; 8658 int pf_id = adapter->vfs_allocated_count; 8659 8660 /* add the filter since PF can receive vlans w/o entry in vlvf */ 8661 if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC)) 8662 igb_vfta_set(hw, vid, pf_id, true, !!vid); 8663 8664 set_bit(vid, adapter->active_vlans); 8665 8666 return 0; 8667 } 8668 8669 static int igb_vlan_rx_kill_vid(struct net_device *netdev, 8670 __be16 proto, u16 vid) 8671 { 8672 struct igb_adapter *adapter = netdev_priv(netdev); 8673 int pf_id = adapter->vfs_allocated_count; 8674 struct e1000_hw *hw = &adapter->hw; 8675 8676 /* remove VID from filter table */ 8677 if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC)) 8678 igb_vfta_set(hw, vid, pf_id, false, true); 8679 8680 clear_bit(vid, adapter->active_vlans); 8681 8682 return 0; 8683 } 8684 8685 static void igb_restore_vlan(struct igb_adapter *adapter) 8686 { 8687 u16 vid = 1; 8688 8689 igb_vlan_mode(adapter->netdev, adapter->netdev->features); 8690 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); 8691 8692 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) 8693 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); 8694 } 8695 8696 int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) 8697 { 8698 struct pci_dev *pdev = adapter->pdev; 8699 struct e1000_mac_info *mac = &adapter->hw.mac; 8700 8701 mac->autoneg = 0; 8702 8703 /* Make sure dplx is at most 1 bit and lsb of speed is not set 8704 * for the switch() below to work 8705 */ 8706 if ((spd & 1) || (dplx & ~1)) 8707 goto err_inval; 8708 8709 /* Fiber NIC's only allow 1000 gbps Full duplex 8710 * and 100Mbps Full duplex for 100baseFx sfp 8711 */ 8712 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { 8713 switch (spd + dplx) { 8714 case SPEED_10 + DUPLEX_HALF: 8715 case SPEED_10 + DUPLEX_FULL: 8716 case SPEED_100 + DUPLEX_HALF: 8717 goto err_inval; 8718 default: 8719 break; 8720 } 8721 } 8722 8723 switch (spd + dplx) { 8724 case SPEED_10 + DUPLEX_HALF: 8725 mac->forced_speed_duplex = ADVERTISE_10_HALF; 8726 break; 8727 case SPEED_10 + DUPLEX_FULL: 8728 mac->forced_speed_duplex = ADVERTISE_10_FULL; 8729 break; 8730 case SPEED_100 + DUPLEX_HALF: 8731 mac->forced_speed_duplex = ADVERTISE_100_HALF; 8732 break; 8733 case SPEED_100 + DUPLEX_FULL: 8734 mac->forced_speed_duplex = ADVERTISE_100_FULL; 8735 break; 8736 case SPEED_1000 + DUPLEX_FULL: 8737 mac->autoneg = 1; 8738 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 8739 break; 8740 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 8741 default: 8742 goto err_inval; 8743 } 8744 8745 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ 8746 adapter->hw.phy.mdix = AUTO_ALL_MODES; 8747 8748 return 0; 8749 8750 err_inval: 8751 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); 8752 return -EINVAL; 8753 } 8754 8755 static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, 8756 bool runtime) 8757 { 8758 struct net_device *netdev = pci_get_drvdata(pdev); 8759 struct igb_adapter *adapter = netdev_priv(netdev); 8760 struct e1000_hw *hw = &adapter->hw; 8761 u32 ctrl, rctl, status; 8762 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; 8763 #ifdef CONFIG_PM 8764 int retval = 0; 8765 #endif 8766 8767 rtnl_lock(); 8768 netif_device_detach(netdev); 8769 8770 if (netif_running(netdev)) 8771 __igb_close(netdev, true); 8772 8773 igb_ptp_suspend(adapter); 8774 8775 igb_clear_interrupt_scheme(adapter); 8776 rtnl_unlock(); 8777 8778 #ifdef CONFIG_PM 8779 retval = pci_save_state(pdev); 8780 if (retval) 8781 return retval; 8782 #endif 8783 8784 status = rd32(E1000_STATUS); 8785 if (status & E1000_STATUS_LU) 8786 wufc &= ~E1000_WUFC_LNKC; 8787 8788 if (wufc) { 8789 igb_setup_rctl(adapter); 8790 igb_set_rx_mode(netdev); 8791 8792 /* turn on all-multi mode if wake on multicast is enabled */ 8793 if (wufc & E1000_WUFC_MC) { 8794 rctl = rd32(E1000_RCTL); 8795 rctl |= E1000_RCTL_MPE; 8796 wr32(E1000_RCTL, rctl); 8797 } 8798 8799 ctrl = rd32(E1000_CTRL); 8800 /* advertise wake from D3Cold */ 8801 #define E1000_CTRL_ADVD3WUC 0x00100000 8802 /* phy power management enable */ 8803 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 8804 ctrl |= E1000_CTRL_ADVD3WUC; 8805 wr32(E1000_CTRL, ctrl); 8806 8807 /* Allow time for pending master requests to run */ 8808 igb_disable_pcie_master(hw); 8809 8810 wr32(E1000_WUC, E1000_WUC_PME_EN); 8811 wr32(E1000_WUFC, wufc); 8812 } else { 8813 wr32(E1000_WUC, 0); 8814 wr32(E1000_WUFC, 0); 8815 } 8816 8817 *enable_wake = wufc || adapter->en_mng_pt; 8818 if (!*enable_wake) 8819 igb_power_down_link(adapter); 8820 else 8821 igb_power_up_link(adapter); 8822 8823 /* Release control of h/w to f/w. If f/w is AMT enabled, this 8824 * would have already happened in close and is redundant. 8825 */ 8826 igb_release_hw_control(adapter); 8827 8828 pci_disable_device(pdev); 8829 8830 return 0; 8831 } 8832 8833 static void igb_deliver_wake_packet(struct net_device *netdev) 8834 { 8835 struct igb_adapter *adapter = netdev_priv(netdev); 8836 struct e1000_hw *hw = &adapter->hw; 8837 struct sk_buff *skb; 8838 u32 wupl; 8839 8840 wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK; 8841 8842 /* WUPM stores only the first 128 bytes of the wake packet. 8843 * Read the packet only if we have the whole thing. 8844 */ 8845 if ((wupl == 0) || (wupl > E1000_WUPM_BYTES)) 8846 return; 8847 8848 skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES); 8849 if (!skb) 8850 return; 8851 8852 skb_put(skb, wupl); 8853 8854 /* Ensure reads are 32-bit aligned */ 8855 wupl = roundup(wupl, 4); 8856 8857 memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl); 8858 8859 skb->protocol = eth_type_trans(skb, netdev); 8860 netif_rx(skb); 8861 } 8862 8863 static int __maybe_unused igb_suspend(struct device *dev) 8864 { 8865 int retval; 8866 bool wake; 8867 struct pci_dev *pdev = to_pci_dev(dev); 8868 8869 retval = __igb_shutdown(pdev, &wake, 0); 8870 if (retval) 8871 return retval; 8872 8873 if (wake) { 8874 pci_prepare_to_sleep(pdev); 8875 } else { 8876 pci_wake_from_d3(pdev, false); 8877 pci_set_power_state(pdev, PCI_D3hot); 8878 } 8879 8880 return 0; 8881 } 8882 8883 static int __maybe_unused igb_resume(struct device *dev) 8884 { 8885 struct pci_dev *pdev = to_pci_dev(dev); 8886 struct net_device *netdev = pci_get_drvdata(pdev); 8887 struct igb_adapter *adapter = netdev_priv(netdev); 8888 struct e1000_hw *hw = &adapter->hw; 8889 u32 err, val; 8890 8891 pci_set_power_state(pdev, PCI_D0); 8892 pci_restore_state(pdev); 8893 pci_save_state(pdev); 8894 8895 if (!pci_device_is_present(pdev)) 8896 return -ENODEV; 8897 err = pci_enable_device_mem(pdev); 8898 if (err) { 8899 dev_err(&pdev->dev, 8900 "igb: Cannot enable PCI device from suspend\n"); 8901 return err; 8902 } 8903 pci_set_master(pdev); 8904 8905 pci_enable_wake(pdev, PCI_D3hot, 0); 8906 pci_enable_wake(pdev, PCI_D3cold, 0); 8907 8908 if (igb_init_interrupt_scheme(adapter, true)) { 8909 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 8910 return -ENOMEM; 8911 } 8912 8913 igb_reset(adapter); 8914 8915 /* let the f/w know that the h/w is now under the control of the 8916 * driver. 8917 */ 8918 igb_get_hw_control(adapter); 8919 8920 val = rd32(E1000_WUS); 8921 if (val & WAKE_PKT_WUS) 8922 igb_deliver_wake_packet(netdev); 8923 8924 wr32(E1000_WUS, ~0); 8925 8926 rtnl_lock(); 8927 if (!err && netif_running(netdev)) 8928 err = __igb_open(netdev, true); 8929 8930 if (!err) 8931 netif_device_attach(netdev); 8932 rtnl_unlock(); 8933 8934 return err; 8935 } 8936 8937 static int __maybe_unused igb_runtime_idle(struct device *dev) 8938 { 8939 struct pci_dev *pdev = to_pci_dev(dev); 8940 struct net_device *netdev = pci_get_drvdata(pdev); 8941 struct igb_adapter *adapter = netdev_priv(netdev); 8942 8943 if (!igb_has_link(adapter)) 8944 pm_schedule_suspend(dev, MSEC_PER_SEC * 5); 8945 8946 return -EBUSY; 8947 } 8948 8949 static int __maybe_unused igb_runtime_suspend(struct device *dev) 8950 { 8951 struct pci_dev *pdev = to_pci_dev(dev); 8952 int retval; 8953 bool wake; 8954 8955 retval = __igb_shutdown(pdev, &wake, 1); 8956 if (retval) 8957 return retval; 8958 8959 if (wake) { 8960 pci_prepare_to_sleep(pdev); 8961 } else { 8962 pci_wake_from_d3(pdev, false); 8963 pci_set_power_state(pdev, PCI_D3hot); 8964 } 8965 8966 return 0; 8967 } 8968 8969 static int __maybe_unused igb_runtime_resume(struct device *dev) 8970 { 8971 return igb_resume(dev); 8972 } 8973 8974 static void igb_shutdown(struct pci_dev *pdev) 8975 { 8976 bool wake; 8977 8978 __igb_shutdown(pdev, &wake, 0); 8979 8980 if (system_state == SYSTEM_POWER_OFF) { 8981 pci_wake_from_d3(pdev, wake); 8982 pci_set_power_state(pdev, PCI_D3hot); 8983 } 8984 } 8985 8986 #ifdef CONFIG_PCI_IOV 8987 static int igb_sriov_reinit(struct pci_dev *dev) 8988 { 8989 struct net_device *netdev = pci_get_drvdata(dev); 8990 struct igb_adapter *adapter = netdev_priv(netdev); 8991 struct pci_dev *pdev = adapter->pdev; 8992 8993 rtnl_lock(); 8994 8995 if (netif_running(netdev)) 8996 igb_close(netdev); 8997 else 8998 igb_reset(adapter); 8999 9000 igb_clear_interrupt_scheme(adapter); 9001 9002 igb_init_queue_configuration(adapter); 9003 9004 if (igb_init_interrupt_scheme(adapter, true)) { 9005 rtnl_unlock(); 9006 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 9007 return -ENOMEM; 9008 } 9009 9010 if (netif_running(netdev)) 9011 igb_open(netdev); 9012 9013 rtnl_unlock(); 9014 9015 return 0; 9016 } 9017 9018 static int igb_pci_disable_sriov(struct pci_dev *dev) 9019 { 9020 int err = igb_disable_sriov(dev); 9021 9022 if (!err) 9023 err = igb_sriov_reinit(dev); 9024 9025 return err; 9026 } 9027 9028 static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs) 9029 { 9030 int err = igb_enable_sriov(dev, num_vfs); 9031 9032 if (err) 9033 goto out; 9034 9035 err = igb_sriov_reinit(dev); 9036 if (!err) 9037 return num_vfs; 9038 9039 out: 9040 return err; 9041 } 9042 9043 #endif 9044 static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs) 9045 { 9046 #ifdef CONFIG_PCI_IOV 9047 if (num_vfs == 0) 9048 return igb_pci_disable_sriov(dev); 9049 else 9050 return igb_pci_enable_sriov(dev, num_vfs); 9051 #endif 9052 return 0; 9053 } 9054 9055 #ifdef CONFIG_NET_POLL_CONTROLLER 9056 /* Polling 'interrupt' - used by things like netconsole to send skbs 9057 * without having to re-enable interrupts. It's not called while 9058 * the interrupt routine is executing. 9059 */ 9060 static void igb_netpoll(struct net_device *netdev) 9061 { 9062 struct igb_adapter *adapter = netdev_priv(netdev); 9063 struct e1000_hw *hw = &adapter->hw; 9064 struct igb_q_vector *q_vector; 9065 int i; 9066 9067 for (i = 0; i < adapter->num_q_vectors; i++) { 9068 q_vector = adapter->q_vector[i]; 9069 if (adapter->flags & IGB_FLAG_HAS_MSIX) 9070 wr32(E1000_EIMC, q_vector->eims_value); 9071 else 9072 igb_irq_disable(adapter); 9073 napi_schedule(&q_vector->napi); 9074 } 9075 } 9076 #endif /* CONFIG_NET_POLL_CONTROLLER */ 9077 9078 /** 9079 * igb_io_error_detected - called when PCI error is detected 9080 * @pdev: Pointer to PCI device 9081 * @state: The current pci connection state 9082 * 9083 * This function is called after a PCI bus error affecting 9084 * this device has been detected. 9085 **/ 9086 static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, 9087 pci_channel_state_t state) 9088 { 9089 struct net_device *netdev = pci_get_drvdata(pdev); 9090 struct igb_adapter *adapter = netdev_priv(netdev); 9091 9092 netif_device_detach(netdev); 9093 9094 if (state == pci_channel_io_perm_failure) 9095 return PCI_ERS_RESULT_DISCONNECT; 9096 9097 if (netif_running(netdev)) 9098 igb_down(adapter); 9099 pci_disable_device(pdev); 9100 9101 /* Request a slot slot reset. */ 9102 return PCI_ERS_RESULT_NEED_RESET; 9103 } 9104 9105 /** 9106 * igb_io_slot_reset - called after the pci bus has been reset. 9107 * @pdev: Pointer to PCI device 9108 * 9109 * Restart the card from scratch, as if from a cold-boot. Implementation 9110 * resembles the first-half of the igb_resume routine. 9111 **/ 9112 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) 9113 { 9114 struct net_device *netdev = pci_get_drvdata(pdev); 9115 struct igb_adapter *adapter = netdev_priv(netdev); 9116 struct e1000_hw *hw = &adapter->hw; 9117 pci_ers_result_t result; 9118 int err; 9119 9120 if (pci_enable_device_mem(pdev)) { 9121 dev_err(&pdev->dev, 9122 "Cannot re-enable PCI device after reset.\n"); 9123 result = PCI_ERS_RESULT_DISCONNECT; 9124 } else { 9125 pci_set_master(pdev); 9126 pci_restore_state(pdev); 9127 pci_save_state(pdev); 9128 9129 pci_enable_wake(pdev, PCI_D3hot, 0); 9130 pci_enable_wake(pdev, PCI_D3cold, 0); 9131 9132 /* In case of PCI error, adapter lose its HW address 9133 * so we should re-assign it here. 9134 */ 9135 hw->hw_addr = adapter->io_addr; 9136 9137 igb_reset(adapter); 9138 wr32(E1000_WUS, ~0); 9139 result = PCI_ERS_RESULT_RECOVERED; 9140 } 9141 9142 err = pci_cleanup_aer_uncorrect_error_status(pdev); 9143 if (err) { 9144 dev_err(&pdev->dev, 9145 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", 9146 err); 9147 /* non-fatal, continue */ 9148 } 9149 9150 return result; 9151 } 9152 9153 /** 9154 * igb_io_resume - called when traffic can start flowing again. 9155 * @pdev: Pointer to PCI device 9156 * 9157 * This callback is called when the error recovery driver tells us that 9158 * its OK to resume normal operation. Implementation resembles the 9159 * second-half of the igb_resume routine. 9160 */ 9161 static void igb_io_resume(struct pci_dev *pdev) 9162 { 9163 struct net_device *netdev = pci_get_drvdata(pdev); 9164 struct igb_adapter *adapter = netdev_priv(netdev); 9165 9166 if (netif_running(netdev)) { 9167 if (igb_up(adapter)) { 9168 dev_err(&pdev->dev, "igb_up failed after reset\n"); 9169 return; 9170 } 9171 } 9172 9173 netif_device_attach(netdev); 9174 9175 /* let the f/w know that the h/w is now under the control of the 9176 * driver. 9177 */ 9178 igb_get_hw_control(adapter); 9179 } 9180 9181 /** 9182 * igb_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table 9183 * @adapter: Pointer to adapter structure 9184 * @index: Index of the RAR entry which need to be synced with MAC table 9185 **/ 9186 static void igb_rar_set_index(struct igb_adapter *adapter, u32 index) 9187 { 9188 struct e1000_hw *hw = &adapter->hw; 9189 u32 rar_low, rar_high; 9190 u8 *addr = adapter->mac_table[index].addr; 9191 9192 /* HW expects these to be in network order when they are plugged 9193 * into the registers which are little endian. In order to guarantee 9194 * that ordering we need to do an leXX_to_cpup here in order to be 9195 * ready for the byteswap that occurs with writel 9196 */ 9197 rar_low = le32_to_cpup((__le32 *)(addr)); 9198 rar_high = le16_to_cpup((__le16 *)(addr + 4)); 9199 9200 /* Indicate to hardware the Address is Valid. */ 9201 if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) { 9202 if (is_valid_ether_addr(addr)) 9203 rar_high |= E1000_RAH_AV; 9204 9205 if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR) 9206 rar_high |= E1000_RAH_ASEL_SRC_ADDR; 9207 9208 switch (hw->mac.type) { 9209 case e1000_82575: 9210 case e1000_i210: 9211 if (adapter->mac_table[index].state & 9212 IGB_MAC_STATE_QUEUE_STEERING) 9213 rar_high |= E1000_RAH_QSEL_ENABLE; 9214 9215 rar_high |= E1000_RAH_POOL_1 * 9216 adapter->mac_table[index].queue; 9217 break; 9218 default: 9219 rar_high |= E1000_RAH_POOL_1 << 9220 adapter->mac_table[index].queue; 9221 break; 9222 } 9223 } 9224 9225 wr32(E1000_RAL(index), rar_low); 9226 wrfl(); 9227 wr32(E1000_RAH(index), rar_high); 9228 wrfl(); 9229 } 9230 9231 static int igb_set_vf_mac(struct igb_adapter *adapter, 9232 int vf, unsigned char *mac_addr) 9233 { 9234 struct e1000_hw *hw = &adapter->hw; 9235 /* VF MAC addresses start at end of receive addresses and moves 9236 * towards the first, as a result a collision should not be possible 9237 */ 9238 int rar_entry = hw->mac.rar_entry_count - (vf + 1); 9239 unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses; 9240 9241 ether_addr_copy(vf_mac_addr, mac_addr); 9242 ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr); 9243 adapter->mac_table[rar_entry].queue = vf; 9244 adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE; 9245 igb_rar_set_index(adapter, rar_entry); 9246 9247 return 0; 9248 } 9249 9250 static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 9251 { 9252 struct igb_adapter *adapter = netdev_priv(netdev); 9253 9254 if (vf >= adapter->vfs_allocated_count) 9255 return -EINVAL; 9256 9257 /* Setting the VF MAC to 0 reverts the IGB_VF_FLAG_PF_SET_MAC 9258 * flag and allows to overwrite the MAC via VF netdev. This 9259 * is necessary to allow libvirt a way to restore the original 9260 * MAC after unbinding vfio-pci and reloading igbvf after shutting 9261 * down a VM. 9262 */ 9263 if (is_zero_ether_addr(mac)) { 9264 adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC; 9265 dev_info(&adapter->pdev->dev, 9266 "remove administratively set MAC on VF %d\n", 9267 vf); 9268 } else if (is_valid_ether_addr(mac)) { 9269 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; 9270 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", 9271 mac, vf); 9272 dev_info(&adapter->pdev->dev, 9273 "Reload the VF driver to make this change effective."); 9274 /* Generate additional warning if PF is down */ 9275 if (test_bit(__IGB_DOWN, &adapter->state)) { 9276 dev_warn(&adapter->pdev->dev, 9277 "The VF MAC address has been set, but the PF device is not up.\n"); 9278 dev_warn(&adapter->pdev->dev, 9279 "Bring the PF device up before attempting to use the VF device.\n"); 9280 } 9281 } else { 9282 return -EINVAL; 9283 } 9284 return igb_set_vf_mac(adapter, vf, mac); 9285 } 9286 9287 static int igb_link_mbps(int internal_link_speed) 9288 { 9289 switch (internal_link_speed) { 9290 case SPEED_100: 9291 return 100; 9292 case SPEED_1000: 9293 return 1000; 9294 default: 9295 return 0; 9296 } 9297 } 9298 9299 static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, 9300 int link_speed) 9301 { 9302 int rf_dec, rf_int; 9303 u32 bcnrc_val; 9304 9305 if (tx_rate != 0) { 9306 /* Calculate the rate factor values to set */ 9307 rf_int = link_speed / tx_rate; 9308 rf_dec = (link_speed - (rf_int * tx_rate)); 9309 rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) / 9310 tx_rate; 9311 9312 bcnrc_val = E1000_RTTBCNRC_RS_ENA; 9313 bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) & 9314 E1000_RTTBCNRC_RF_INT_MASK); 9315 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK); 9316 } else { 9317 bcnrc_val = 0; 9318 } 9319 9320 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */ 9321 /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM 9322 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported. 9323 */ 9324 wr32(E1000_RTTBCNRM, 0x14); 9325 wr32(E1000_RTTBCNRC, bcnrc_val); 9326 } 9327 9328 static void igb_check_vf_rate_limit(struct igb_adapter *adapter) 9329 { 9330 int actual_link_speed, i; 9331 bool reset_rate = false; 9332 9333 /* VF TX rate limit was not set or not supported */ 9334 if ((adapter->vf_rate_link_speed == 0) || 9335 (adapter->hw.mac.type != e1000_82576)) 9336 return; 9337 9338 actual_link_speed = igb_link_mbps(adapter->link_speed); 9339 if (actual_link_speed != adapter->vf_rate_link_speed) { 9340 reset_rate = true; 9341 adapter->vf_rate_link_speed = 0; 9342 dev_info(&adapter->pdev->dev, 9343 "Link speed has been changed. VF Transmit rate is disabled\n"); 9344 } 9345 9346 for (i = 0; i < adapter->vfs_allocated_count; i++) { 9347 if (reset_rate) 9348 adapter->vf_data[i].tx_rate = 0; 9349 9350 igb_set_vf_rate_limit(&adapter->hw, i, 9351 adapter->vf_data[i].tx_rate, 9352 actual_link_speed); 9353 } 9354 } 9355 9356 static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, 9357 int min_tx_rate, int max_tx_rate) 9358 { 9359 struct igb_adapter *adapter = netdev_priv(netdev); 9360 struct e1000_hw *hw = &adapter->hw; 9361 int actual_link_speed; 9362 9363 if (hw->mac.type != e1000_82576) 9364 return -EOPNOTSUPP; 9365 9366 if (min_tx_rate) 9367 return -EINVAL; 9368 9369 actual_link_speed = igb_link_mbps(adapter->link_speed); 9370 if ((vf >= adapter->vfs_allocated_count) || 9371 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) || 9372 (max_tx_rate < 0) || 9373 (max_tx_rate > actual_link_speed)) 9374 return -EINVAL; 9375 9376 adapter->vf_rate_link_speed = actual_link_speed; 9377 adapter->vf_data[vf].tx_rate = (u16)max_tx_rate; 9378 igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed); 9379 9380 return 0; 9381 } 9382 9383 static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, 9384 bool setting) 9385 { 9386 struct igb_adapter *adapter = netdev_priv(netdev); 9387 struct e1000_hw *hw = &adapter->hw; 9388 u32 reg_val, reg_offset; 9389 9390 if (!adapter->vfs_allocated_count) 9391 return -EOPNOTSUPP; 9392 9393 if (vf >= adapter->vfs_allocated_count) 9394 return -EINVAL; 9395 9396 reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC; 9397 reg_val = rd32(reg_offset); 9398 if (setting) 9399 reg_val |= (BIT(vf) | 9400 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)); 9401 else 9402 reg_val &= ~(BIT(vf) | 9403 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)); 9404 wr32(reg_offset, reg_val); 9405 9406 adapter->vf_data[vf].spoofchk_enabled = setting; 9407 return 0; 9408 } 9409 9410 static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) 9411 { 9412 struct igb_adapter *adapter = netdev_priv(netdev); 9413 9414 if (vf >= adapter->vfs_allocated_count) 9415 return -EINVAL; 9416 if (adapter->vf_data[vf].trusted == setting) 9417 return 0; 9418 9419 adapter->vf_data[vf].trusted = setting; 9420 9421 dev_info(&adapter->pdev->dev, "VF %u is %strusted\n", 9422 vf, setting ? "" : "not "); 9423 return 0; 9424 } 9425 9426 static int igb_ndo_get_vf_config(struct net_device *netdev, 9427 int vf, struct ifla_vf_info *ivi) 9428 { 9429 struct igb_adapter *adapter = netdev_priv(netdev); 9430 if (vf >= adapter->vfs_allocated_count) 9431 return -EINVAL; 9432 ivi->vf = vf; 9433 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); 9434 ivi->max_tx_rate = adapter->vf_data[vf].tx_rate; 9435 ivi->min_tx_rate = 0; 9436 ivi->vlan = adapter->vf_data[vf].pf_vlan; 9437 ivi->qos = adapter->vf_data[vf].pf_qos; 9438 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled; 9439 ivi->trusted = adapter->vf_data[vf].trusted; 9440 return 0; 9441 } 9442 9443 static void igb_vmm_control(struct igb_adapter *adapter) 9444 { 9445 struct e1000_hw *hw = &adapter->hw; 9446 u32 reg; 9447 9448 switch (hw->mac.type) { 9449 case e1000_82575: 9450 case e1000_i210: 9451 case e1000_i211: 9452 case e1000_i354: 9453 default: 9454 /* replication is not supported for 82575 */ 9455 return; 9456 case e1000_82576: 9457 /* notify HW that the MAC is adding vlan tags */ 9458 reg = rd32(E1000_DTXCTL); 9459 reg |= E1000_DTXCTL_VLAN_ADDED; 9460 wr32(E1000_DTXCTL, reg); 9461 /* Fall through */ 9462 case e1000_82580: 9463 /* enable replication vlan tag stripping */ 9464 reg = rd32(E1000_RPLOLR); 9465 reg |= E1000_RPLOLR_STRVLAN; 9466 wr32(E1000_RPLOLR, reg); 9467 /* Fall through */ 9468 case e1000_i350: 9469 /* none of the above registers are supported by i350 */ 9470 break; 9471 } 9472 9473 if (adapter->vfs_allocated_count) { 9474 igb_vmdq_set_loopback_pf(hw, true); 9475 igb_vmdq_set_replication_pf(hw, true); 9476 igb_vmdq_set_anti_spoofing_pf(hw, true, 9477 adapter->vfs_allocated_count); 9478 } else { 9479 igb_vmdq_set_loopback_pf(hw, false); 9480 igb_vmdq_set_replication_pf(hw, false); 9481 } 9482 } 9483 9484 static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) 9485 { 9486 struct e1000_hw *hw = &adapter->hw; 9487 u32 dmac_thr; 9488 u16 hwm; 9489 9490 if (hw->mac.type > e1000_82580) { 9491 if (adapter->flags & IGB_FLAG_DMAC) { 9492 u32 reg; 9493 9494 /* force threshold to 0. */ 9495 wr32(E1000_DMCTXTH, 0); 9496 9497 /* DMA Coalescing high water mark needs to be greater 9498 * than the Rx threshold. Set hwm to PBA - max frame 9499 * size in 16B units, capping it at PBA - 6KB. 9500 */ 9501 hwm = 64 * (pba - 6); 9502 reg = rd32(E1000_FCRTC); 9503 reg &= ~E1000_FCRTC_RTH_COAL_MASK; 9504 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) 9505 & E1000_FCRTC_RTH_COAL_MASK); 9506 wr32(E1000_FCRTC, reg); 9507 9508 /* Set the DMA Coalescing Rx threshold to PBA - 2 * max 9509 * frame size, capping it at PBA - 10KB. 9510 */ 9511 dmac_thr = pba - 10; 9512 reg = rd32(E1000_DMACR); 9513 reg &= ~E1000_DMACR_DMACTHR_MASK; 9514 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT) 9515 & E1000_DMACR_DMACTHR_MASK); 9516 9517 /* transition to L0x or L1 if available..*/ 9518 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); 9519 9520 /* watchdog timer= +-1000 usec in 32usec intervals */ 9521 reg |= (1000 >> 5); 9522 9523 /* Disable BMC-to-OS Watchdog Enable */ 9524 if (hw->mac.type != e1000_i354) 9525 reg &= ~E1000_DMACR_DC_BMC2OSW_EN; 9526 9527 wr32(E1000_DMACR, reg); 9528 9529 /* no lower threshold to disable 9530 * coalescing(smart fifb)-UTRESH=0 9531 */ 9532 wr32(E1000_DMCRTRH, 0); 9533 9534 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4); 9535 9536 wr32(E1000_DMCTLX, reg); 9537 9538 /* free space in tx packet buffer to wake from 9539 * DMA coal 9540 */ 9541 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - 9542 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); 9543 9544 /* make low power state decision controlled 9545 * by DMA coal 9546 */ 9547 reg = rd32(E1000_PCIEMISC); 9548 reg &= ~E1000_PCIEMISC_LX_DECISION; 9549 wr32(E1000_PCIEMISC, reg); 9550 } /* endif adapter->dmac is not disabled */ 9551 } else if (hw->mac.type == e1000_82580) { 9552 u32 reg = rd32(E1000_PCIEMISC); 9553 9554 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION); 9555 wr32(E1000_DMACR, 0); 9556 } 9557 } 9558 9559 /** 9560 * igb_read_i2c_byte - Reads 8 bit word over I2C 9561 * @hw: pointer to hardware structure 9562 * @byte_offset: byte offset to read 9563 * @dev_addr: device address 9564 * @data: value read 9565 * 9566 * Performs byte read operation over I2C interface at 9567 * a specified device address. 9568 **/ 9569 s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, 9570 u8 dev_addr, u8 *data) 9571 { 9572 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); 9573 struct i2c_client *this_client = adapter->i2c_client; 9574 s32 status; 9575 u16 swfw_mask = 0; 9576 9577 if (!this_client) 9578 return E1000_ERR_I2C; 9579 9580 swfw_mask = E1000_SWFW_PHY0_SM; 9581 9582 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) 9583 return E1000_ERR_SWFW_SYNC; 9584 9585 status = i2c_smbus_read_byte_data(this_client, byte_offset); 9586 hw->mac.ops.release_swfw_sync(hw, swfw_mask); 9587 9588 if (status < 0) 9589 return E1000_ERR_I2C; 9590 else { 9591 *data = status; 9592 return 0; 9593 } 9594 } 9595 9596 /** 9597 * igb_write_i2c_byte - Writes 8 bit word over I2C 9598 * @hw: pointer to hardware structure 9599 * @byte_offset: byte offset to write 9600 * @dev_addr: device address 9601 * @data: value to write 9602 * 9603 * Performs byte write operation over I2C interface at 9604 * a specified device address. 9605 **/ 9606 s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, 9607 u8 dev_addr, u8 data) 9608 { 9609 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); 9610 struct i2c_client *this_client = adapter->i2c_client; 9611 s32 status; 9612 u16 swfw_mask = E1000_SWFW_PHY0_SM; 9613 9614 if (!this_client) 9615 return E1000_ERR_I2C; 9616 9617 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) 9618 return E1000_ERR_SWFW_SYNC; 9619 status = i2c_smbus_write_byte_data(this_client, byte_offset, data); 9620 hw->mac.ops.release_swfw_sync(hw, swfw_mask); 9621 9622 if (status) 9623 return E1000_ERR_I2C; 9624 else 9625 return 0; 9626 9627 } 9628 9629 int igb_reinit_queues(struct igb_adapter *adapter) 9630 { 9631 struct net_device *netdev = adapter->netdev; 9632 struct pci_dev *pdev = adapter->pdev; 9633 int err = 0; 9634 9635 if (netif_running(netdev)) 9636 igb_close(netdev); 9637 9638 igb_reset_interrupt_capability(adapter); 9639 9640 if (igb_init_interrupt_scheme(adapter, true)) { 9641 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 9642 return -ENOMEM; 9643 } 9644 9645 if (netif_running(netdev)) 9646 err = igb_open(netdev); 9647 9648 return err; 9649 } 9650 9651 static void igb_nfc_filter_exit(struct igb_adapter *adapter) 9652 { 9653 struct igb_nfc_filter *rule; 9654 9655 spin_lock(&adapter->nfc_lock); 9656 9657 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) 9658 igb_erase_filter(adapter, rule); 9659 9660 hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node) 9661 igb_erase_filter(adapter, rule); 9662 9663 spin_unlock(&adapter->nfc_lock); 9664 } 9665 9666 static void igb_nfc_filter_restore(struct igb_adapter *adapter) 9667 { 9668 struct igb_nfc_filter *rule; 9669 9670 spin_lock(&adapter->nfc_lock); 9671 9672 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) 9673 igb_add_filter(adapter, rule); 9674 9675 spin_unlock(&adapter->nfc_lock); 9676 } 9677 /* igb_main.c */ 9678