1 /**************************************************************************** 2 * Driver for Solarflare network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2006-2013 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #ifndef EFX_NIC_H 12 #define EFX_NIC_H 13 14 #include <linux/net_tstamp.h> 15 #include <linux/i2c-algo-bit.h> 16 #include "net_driver.h" 17 #include "efx.h" 18 #include "mcdi.h" 19 20 enum { 21 EFX_REV_FALCON_A0 = 0, 22 EFX_REV_FALCON_A1 = 1, 23 EFX_REV_FALCON_B0 = 2, 24 EFX_REV_SIENA_A0 = 3, 25 EFX_REV_HUNT_A0 = 4, 26 }; 27 28 static inline int efx_nic_rev(struct efx_nic *efx) 29 { 30 return efx->type->revision; 31 } 32 33 extern u32 efx_farch_fpga_ver(struct efx_nic *efx); 34 35 /* NIC has two interlinked PCI functions for the same port. */ 36 static inline bool efx_nic_is_dual_func(struct efx_nic *efx) 37 { 38 return efx_nic_rev(efx) < EFX_REV_FALCON_B0; 39 } 40 41 /* Read the current event from the event queue */ 42 static inline efx_qword_t *efx_event(struct efx_channel *channel, 43 unsigned int index) 44 { 45 return ((efx_qword_t *) (channel->eventq.buf.addr)) + 46 (index & channel->eventq_mask); 47 } 48 49 /* See if an event is present 50 * 51 * We check both the high and low dword of the event for all ones. We 52 * wrote all ones when we cleared the event, and no valid event can 53 * have all ones in either its high or low dwords. This approach is 54 * robust against reordering. 55 * 56 * Note that using a single 64-bit comparison is incorrect; even 57 * though the CPU read will be atomic, the DMA write may not be. 58 */ 59 static inline int efx_event_present(efx_qword_t *event) 60 { 61 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | 62 EFX_DWORD_IS_ALL_ONES(event->dword[1])); 63 } 64 65 /* Returns a pointer to the specified transmit descriptor in the TX 66 * descriptor queue belonging to the specified channel. 67 */ 68 static inline efx_qword_t * 69 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) 70 { 71 return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; 72 } 73 74 /* Decide whether to push a TX descriptor to the NIC vs merely writing 75 * the doorbell. This can reduce latency when we are adding a single 76 * descriptor to an empty queue, but is otherwise pointless. Further, 77 * Falcon and Siena have hardware bugs (SF bug 33851) that may be 78 * triggered if we don't check this. 79 */ 80 static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue, 81 unsigned int write_count) 82 { 83 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); 84 85 if (empty_read_count == 0) 86 return false; 87 88 tx_queue->empty_read_count = 0; 89 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0 90 && tx_queue->write_count - write_count == 1; 91 } 92 93 /* Returns a pointer to the specified descriptor in the RX descriptor queue */ 94 static inline efx_qword_t * 95 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 96 { 97 return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index; 98 } 99 100 enum { 101 PHY_TYPE_NONE = 0, 102 PHY_TYPE_TXC43128 = 1, 103 PHY_TYPE_88E1111 = 2, 104 PHY_TYPE_SFX7101 = 3, 105 PHY_TYPE_QT2022C2 = 4, 106 PHY_TYPE_PM8358 = 6, 107 PHY_TYPE_SFT9001A = 8, 108 PHY_TYPE_QT2025C = 9, 109 PHY_TYPE_SFT9001B = 10, 110 }; 111 112 #define FALCON_XMAC_LOOPBACKS \ 113 ((1 << LOOPBACK_XGMII) | \ 114 (1 << LOOPBACK_XGXS) | \ 115 (1 << LOOPBACK_XAUI)) 116 117 /* Alignment of PCIe DMA boundaries (4KB) */ 118 #define EFX_PAGE_SIZE 4096 119 /* Size and alignment of buffer table entries (same) */ 120 #define EFX_BUF_SIZE EFX_PAGE_SIZE 121 122 /** 123 * struct falcon_board_type - board operations and type information 124 * @id: Board type id, as found in NVRAM 125 * @init: Allocate resources and initialise peripheral hardware 126 * @init_phy: Do board-specific PHY initialisation 127 * @fini: Shut down hardware and free resources 128 * @set_id_led: Set state of identifying LED or revert to automatic function 129 * @monitor: Board-specific health check function 130 */ 131 struct falcon_board_type { 132 u8 id; 133 int (*init) (struct efx_nic *nic); 134 void (*init_phy) (struct efx_nic *efx); 135 void (*fini) (struct efx_nic *nic); 136 void (*set_id_led) (struct efx_nic *efx, enum efx_led_mode mode); 137 int (*monitor) (struct efx_nic *nic); 138 }; 139 140 /** 141 * struct falcon_board - board information 142 * @type: Type of board 143 * @major: Major rev. ('A', 'B' ...) 144 * @minor: Minor rev. (0, 1, ...) 145 * @i2c_adap: I2C adapter for on-board peripherals 146 * @i2c_data: Data for bit-banging algorithm 147 * @hwmon_client: I2C client for hardware monitor 148 * @ioexp_client: I2C client for power/port control 149 */ 150 struct falcon_board { 151 const struct falcon_board_type *type; 152 int major; 153 int minor; 154 struct i2c_adapter i2c_adap; 155 struct i2c_algo_bit_data i2c_data; 156 struct i2c_client *hwmon_client, *ioexp_client; 157 }; 158 159 /** 160 * struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device 161 * @device_id: Controller's id for the device 162 * @size: Size (in bytes) 163 * @addr_len: Number of address bytes in read/write commands 164 * @munge_address: Flag whether addresses should be munged. 165 * Some devices with 9-bit addresses (e.g. AT25040A EEPROM) 166 * use bit 3 of the command byte as address bit A8, rather 167 * than having a two-byte address. If this flag is set, then 168 * commands should be munged in this way. 169 * @erase_command: Erase command (or 0 if sector erase not needed). 170 * @erase_size: Erase sector size (in bytes) 171 * Erase commands affect sectors with this size and alignment. 172 * This must be a power of two. 173 * @block_size: Write block size (in bytes). 174 * Write commands are limited to blocks with this size and alignment. 175 */ 176 struct falcon_spi_device { 177 int device_id; 178 unsigned int size; 179 unsigned int addr_len; 180 unsigned int munge_address:1; 181 u8 erase_command; 182 unsigned int erase_size; 183 unsigned int block_size; 184 }; 185 186 static inline bool falcon_spi_present(const struct falcon_spi_device *spi) 187 { 188 return spi->size != 0; 189 } 190 191 enum { 192 FALCON_STAT_tx_bytes, 193 FALCON_STAT_tx_packets, 194 FALCON_STAT_tx_pause, 195 FALCON_STAT_tx_control, 196 FALCON_STAT_tx_unicast, 197 FALCON_STAT_tx_multicast, 198 FALCON_STAT_tx_broadcast, 199 FALCON_STAT_tx_lt64, 200 FALCON_STAT_tx_64, 201 FALCON_STAT_tx_65_to_127, 202 FALCON_STAT_tx_128_to_255, 203 FALCON_STAT_tx_256_to_511, 204 FALCON_STAT_tx_512_to_1023, 205 FALCON_STAT_tx_1024_to_15xx, 206 FALCON_STAT_tx_15xx_to_jumbo, 207 FALCON_STAT_tx_gtjumbo, 208 FALCON_STAT_tx_non_tcpudp, 209 FALCON_STAT_tx_mac_src_error, 210 FALCON_STAT_tx_ip_src_error, 211 FALCON_STAT_rx_bytes, 212 FALCON_STAT_rx_good_bytes, 213 FALCON_STAT_rx_bad_bytes, 214 FALCON_STAT_rx_packets, 215 FALCON_STAT_rx_good, 216 FALCON_STAT_rx_bad, 217 FALCON_STAT_rx_pause, 218 FALCON_STAT_rx_control, 219 FALCON_STAT_rx_unicast, 220 FALCON_STAT_rx_multicast, 221 FALCON_STAT_rx_broadcast, 222 FALCON_STAT_rx_lt64, 223 FALCON_STAT_rx_64, 224 FALCON_STAT_rx_65_to_127, 225 FALCON_STAT_rx_128_to_255, 226 FALCON_STAT_rx_256_to_511, 227 FALCON_STAT_rx_512_to_1023, 228 FALCON_STAT_rx_1024_to_15xx, 229 FALCON_STAT_rx_15xx_to_jumbo, 230 FALCON_STAT_rx_gtjumbo, 231 FALCON_STAT_rx_bad_lt64, 232 FALCON_STAT_rx_bad_gtjumbo, 233 FALCON_STAT_rx_overflow, 234 FALCON_STAT_rx_symbol_error, 235 FALCON_STAT_rx_align_error, 236 FALCON_STAT_rx_length_error, 237 FALCON_STAT_rx_internal_error, 238 FALCON_STAT_rx_nodesc_drop_cnt, 239 FALCON_STAT_COUNT 240 }; 241 242 /** 243 * struct falcon_nic_data - Falcon NIC state 244 * @pci_dev2: Secondary function of Falcon A 245 * @board: Board state and functions 246 * @stats: Hardware statistics 247 * @stats_disable_count: Nest count for disabling statistics fetches 248 * @stats_pending: Is there a pending DMA of MAC statistics. 249 * @stats_timer: A timer for regularly fetching MAC statistics. 250 * @spi_flash: SPI flash device 251 * @spi_eeprom: SPI EEPROM device 252 * @spi_lock: SPI bus lock 253 * @mdio_lock: MDIO bus lock 254 * @xmac_poll_required: XMAC link state needs polling 255 */ 256 struct falcon_nic_data { 257 struct pci_dev *pci_dev2; 258 struct falcon_board board; 259 u64 stats[FALCON_STAT_COUNT]; 260 unsigned int stats_disable_count; 261 bool stats_pending; 262 struct timer_list stats_timer; 263 struct falcon_spi_device spi_flash; 264 struct falcon_spi_device spi_eeprom; 265 struct mutex spi_lock; 266 struct mutex mdio_lock; 267 bool xmac_poll_required; 268 }; 269 270 static inline struct falcon_board *falcon_board(struct efx_nic *efx) 271 { 272 struct falcon_nic_data *data = efx->nic_data; 273 return &data->board; 274 } 275 276 enum { 277 SIENA_STAT_tx_bytes, 278 SIENA_STAT_tx_good_bytes, 279 SIENA_STAT_tx_bad_bytes, 280 SIENA_STAT_tx_packets, 281 SIENA_STAT_tx_bad, 282 SIENA_STAT_tx_pause, 283 SIENA_STAT_tx_control, 284 SIENA_STAT_tx_unicast, 285 SIENA_STAT_tx_multicast, 286 SIENA_STAT_tx_broadcast, 287 SIENA_STAT_tx_lt64, 288 SIENA_STAT_tx_64, 289 SIENA_STAT_tx_65_to_127, 290 SIENA_STAT_tx_128_to_255, 291 SIENA_STAT_tx_256_to_511, 292 SIENA_STAT_tx_512_to_1023, 293 SIENA_STAT_tx_1024_to_15xx, 294 SIENA_STAT_tx_15xx_to_jumbo, 295 SIENA_STAT_tx_gtjumbo, 296 SIENA_STAT_tx_collision, 297 SIENA_STAT_tx_single_collision, 298 SIENA_STAT_tx_multiple_collision, 299 SIENA_STAT_tx_excessive_collision, 300 SIENA_STAT_tx_deferred, 301 SIENA_STAT_tx_late_collision, 302 SIENA_STAT_tx_excessive_deferred, 303 SIENA_STAT_tx_non_tcpudp, 304 SIENA_STAT_tx_mac_src_error, 305 SIENA_STAT_tx_ip_src_error, 306 SIENA_STAT_rx_bytes, 307 SIENA_STAT_rx_good_bytes, 308 SIENA_STAT_rx_bad_bytes, 309 SIENA_STAT_rx_packets, 310 SIENA_STAT_rx_good, 311 SIENA_STAT_rx_bad, 312 SIENA_STAT_rx_pause, 313 SIENA_STAT_rx_control, 314 SIENA_STAT_rx_unicast, 315 SIENA_STAT_rx_multicast, 316 SIENA_STAT_rx_broadcast, 317 SIENA_STAT_rx_lt64, 318 SIENA_STAT_rx_64, 319 SIENA_STAT_rx_65_to_127, 320 SIENA_STAT_rx_128_to_255, 321 SIENA_STAT_rx_256_to_511, 322 SIENA_STAT_rx_512_to_1023, 323 SIENA_STAT_rx_1024_to_15xx, 324 SIENA_STAT_rx_15xx_to_jumbo, 325 SIENA_STAT_rx_gtjumbo, 326 SIENA_STAT_rx_bad_gtjumbo, 327 SIENA_STAT_rx_overflow, 328 SIENA_STAT_rx_false_carrier, 329 SIENA_STAT_rx_symbol_error, 330 SIENA_STAT_rx_align_error, 331 SIENA_STAT_rx_length_error, 332 SIENA_STAT_rx_internal_error, 333 SIENA_STAT_rx_nodesc_drop_cnt, 334 SIENA_STAT_COUNT 335 }; 336 337 /** 338 * struct siena_nic_data - Siena NIC state 339 * @wol_filter_id: Wake-on-LAN packet filter id 340 * @stats: Hardware statistics 341 */ 342 struct siena_nic_data { 343 int wol_filter_id; 344 u64 stats[SIENA_STAT_COUNT]; 345 }; 346 347 enum { 348 EF10_STAT_tx_bytes, 349 EF10_STAT_tx_packets, 350 EF10_STAT_tx_pause, 351 EF10_STAT_tx_control, 352 EF10_STAT_tx_unicast, 353 EF10_STAT_tx_multicast, 354 EF10_STAT_tx_broadcast, 355 EF10_STAT_tx_lt64, 356 EF10_STAT_tx_64, 357 EF10_STAT_tx_65_to_127, 358 EF10_STAT_tx_128_to_255, 359 EF10_STAT_tx_256_to_511, 360 EF10_STAT_tx_512_to_1023, 361 EF10_STAT_tx_1024_to_15xx, 362 EF10_STAT_tx_15xx_to_jumbo, 363 EF10_STAT_rx_bytes, 364 EF10_STAT_rx_bytes_minus_good_bytes, 365 EF10_STAT_rx_good_bytes, 366 EF10_STAT_rx_bad_bytes, 367 EF10_STAT_rx_packets, 368 EF10_STAT_rx_good, 369 EF10_STAT_rx_bad, 370 EF10_STAT_rx_pause, 371 EF10_STAT_rx_control, 372 EF10_STAT_rx_unicast, 373 EF10_STAT_rx_multicast, 374 EF10_STAT_rx_broadcast, 375 EF10_STAT_rx_lt64, 376 EF10_STAT_rx_64, 377 EF10_STAT_rx_65_to_127, 378 EF10_STAT_rx_128_to_255, 379 EF10_STAT_rx_256_to_511, 380 EF10_STAT_rx_512_to_1023, 381 EF10_STAT_rx_1024_to_15xx, 382 EF10_STAT_rx_15xx_to_jumbo, 383 EF10_STAT_rx_gtjumbo, 384 EF10_STAT_rx_bad_gtjumbo, 385 EF10_STAT_rx_overflow, 386 EF10_STAT_rx_align_error, 387 EF10_STAT_rx_length_error, 388 EF10_STAT_rx_nodesc_drops, 389 EF10_STAT_rx_pm_trunc_bb_overflow, 390 EF10_STAT_rx_pm_discard_bb_overflow, 391 EF10_STAT_rx_pm_trunc_vfifo_full, 392 EF10_STAT_rx_pm_discard_vfifo_full, 393 EF10_STAT_rx_pm_trunc_qbb, 394 EF10_STAT_rx_pm_discard_qbb, 395 EF10_STAT_rx_pm_discard_mapping, 396 EF10_STAT_rx_dp_q_disabled_packets, 397 EF10_STAT_rx_dp_di_dropped_packets, 398 EF10_STAT_rx_dp_streaming_packets, 399 EF10_STAT_rx_dp_emerg_fetch, 400 EF10_STAT_rx_dp_emerg_wait, 401 EF10_STAT_COUNT 402 }; 403 404 /** 405 * struct efx_ef10_nic_data - EF10 architecture NIC state 406 * @mcdi_buf: DMA buffer for MCDI 407 * @warm_boot_count: Last seen MC warm boot count 408 * @vi_base: Absolute index of first VI in this function 409 * @n_allocated_vis: Number of VIs allocated to this function 410 * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot 411 * @must_restore_filters: Flag: filters have yet to be restored after MC reboot 412 * @rx_rss_context: Firmware handle for our RSS context 413 * @stats: Hardware statistics 414 * @workaround_35388: Flag: firmware supports workaround for bug 35388 415 * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated 416 * after MC reboot 417 * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of 418 * %MC_CMD_GET_CAPABILITIES response) 419 */ 420 struct efx_ef10_nic_data { 421 struct efx_buffer mcdi_buf; 422 u16 warm_boot_count; 423 unsigned int vi_base; 424 unsigned int n_allocated_vis; 425 bool must_realloc_vis; 426 bool must_restore_filters; 427 u32 rx_rss_context; 428 u64 stats[EF10_STAT_COUNT]; 429 bool workaround_35388; 430 bool must_check_datapath_caps; 431 u32 datapath_caps; 432 }; 433 434 /* 435 * On the SFC9000 family each port is associated with 1 PCI physical 436 * function (PF) handled by sfc and a configurable number of virtual 437 * functions (VFs) that may be handled by some other driver, often in 438 * a VM guest. The queue pointer registers are mapped in both PF and 439 * VF BARs such that an 8K region provides access to a single RX, TX 440 * and event queue (collectively a Virtual Interface, VI or VNIC). 441 * 442 * The PF has access to all 1024 VIs while VFs are mapped to VIs 443 * according to VI_BASE and VI_SCALE: VF i has access to VIs numbered 444 * in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE). 445 * The number of VIs and the VI_SCALE value are configurable but must 446 * be established at boot time by firmware. 447 */ 448 449 /* Maximum VI_SCALE parameter supported by Siena */ 450 #define EFX_VI_SCALE_MAX 6 451 /* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX), 452 * so this is the smallest allowed value. */ 453 #define EFX_VI_BASE 128U 454 /* Maximum number of VFs allowed */ 455 #define EFX_VF_COUNT_MAX 127 456 /* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */ 457 #define EFX_MAX_VF_EVQ_SIZE 8192UL 458 /* The number of buffer table entries reserved for each VI on a VF */ 459 #define EFX_VF_BUFTBL_PER_VI \ 460 ((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) * \ 461 sizeof(efx_qword_t) / EFX_BUF_SIZE) 462 463 #ifdef CONFIG_SFC_SRIOV 464 465 static inline bool efx_sriov_wanted(struct efx_nic *efx) 466 { 467 return efx->vf_count != 0; 468 } 469 static inline bool efx_sriov_enabled(struct efx_nic *efx) 470 { 471 return efx->vf_init_count != 0; 472 } 473 static inline unsigned int efx_vf_size(struct efx_nic *efx) 474 { 475 return 1 << efx->vi_scale; 476 } 477 478 extern int efx_init_sriov(void); 479 extern void efx_sriov_probe(struct efx_nic *efx); 480 extern int efx_sriov_init(struct efx_nic *efx); 481 extern void efx_sriov_mac_address_changed(struct efx_nic *efx); 482 extern void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event); 483 extern void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event); 484 extern void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event); 485 extern void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq); 486 extern void efx_sriov_flr(struct efx_nic *efx, unsigned flr); 487 extern void efx_sriov_reset(struct efx_nic *efx); 488 extern void efx_sriov_fini(struct efx_nic *efx); 489 extern void efx_fini_sriov(void); 490 491 #else 492 493 static inline bool efx_sriov_wanted(struct efx_nic *efx) { return false; } 494 static inline bool efx_sriov_enabled(struct efx_nic *efx) { return false; } 495 static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 0; } 496 497 static inline int efx_init_sriov(void) { return 0; } 498 static inline void efx_sriov_probe(struct efx_nic *efx) {} 499 static inline int efx_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; } 500 static inline void efx_sriov_mac_address_changed(struct efx_nic *efx) {} 501 static inline void efx_sriov_tx_flush_done(struct efx_nic *efx, 502 efx_qword_t *event) {} 503 static inline void efx_sriov_rx_flush_done(struct efx_nic *efx, 504 efx_qword_t *event) {} 505 static inline void efx_sriov_event(struct efx_channel *channel, 506 efx_qword_t *event) {} 507 static inline void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) {} 508 static inline void efx_sriov_flr(struct efx_nic *efx, unsigned flr) {} 509 static inline void efx_sriov_reset(struct efx_nic *efx) {} 510 static inline void efx_sriov_fini(struct efx_nic *efx) {} 511 static inline void efx_fini_sriov(void) {} 512 513 #endif 514 515 extern int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac); 516 extern int efx_sriov_set_vf_vlan(struct net_device *dev, int vf, 517 u16 vlan, u8 qos); 518 extern int efx_sriov_get_vf_config(struct net_device *dev, int vf, 519 struct ifla_vf_info *ivf); 520 extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf, 521 bool spoofchk); 522 523 struct ethtool_ts_info; 524 extern void efx_ptp_probe(struct efx_nic *efx); 525 extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd); 526 extern void efx_ptp_get_ts_info(struct efx_nic *efx, 527 struct ethtool_ts_info *ts_info); 528 extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); 529 extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); 530 extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); 531 532 extern const struct efx_nic_type falcon_a1_nic_type; 533 extern const struct efx_nic_type falcon_b0_nic_type; 534 extern const struct efx_nic_type siena_a0_nic_type; 535 extern const struct efx_nic_type efx_hunt_a0_nic_type; 536 537 /************************************************************************** 538 * 539 * Externs 540 * 541 ************************************************************************** 542 */ 543 544 extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info); 545 546 /* TX data path */ 547 static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) 548 { 549 return tx_queue->efx->type->tx_probe(tx_queue); 550 } 551 static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 552 { 553 tx_queue->efx->type->tx_init(tx_queue); 554 } 555 static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) 556 { 557 tx_queue->efx->type->tx_remove(tx_queue); 558 } 559 static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) 560 { 561 tx_queue->efx->type->tx_write(tx_queue); 562 } 563 564 /* RX data path */ 565 static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) 566 { 567 return rx_queue->efx->type->rx_probe(rx_queue); 568 } 569 static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue) 570 { 571 rx_queue->efx->type->rx_init(rx_queue); 572 } 573 static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) 574 { 575 rx_queue->efx->type->rx_remove(rx_queue); 576 } 577 static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) 578 { 579 rx_queue->efx->type->rx_write(rx_queue); 580 } 581 static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) 582 { 583 rx_queue->efx->type->rx_defer_refill(rx_queue); 584 } 585 586 /* Event data path */ 587 static inline int efx_nic_probe_eventq(struct efx_channel *channel) 588 { 589 return channel->efx->type->ev_probe(channel); 590 } 591 static inline int efx_nic_init_eventq(struct efx_channel *channel) 592 { 593 return channel->efx->type->ev_init(channel); 594 } 595 static inline void efx_nic_fini_eventq(struct efx_channel *channel) 596 { 597 channel->efx->type->ev_fini(channel); 598 } 599 static inline void efx_nic_remove_eventq(struct efx_channel *channel) 600 { 601 channel->efx->type->ev_remove(channel); 602 } 603 static inline int 604 efx_nic_process_eventq(struct efx_channel *channel, int quota) 605 { 606 return channel->efx->type->ev_process(channel, quota); 607 } 608 static inline void efx_nic_eventq_read_ack(struct efx_channel *channel) 609 { 610 channel->efx->type->ev_read_ack(channel); 611 } 612 extern void efx_nic_event_test_start(struct efx_channel *channel); 613 614 /* Falcon/Siena queue operations */ 615 extern int efx_farch_tx_probe(struct efx_tx_queue *tx_queue); 616 extern void efx_farch_tx_init(struct efx_tx_queue *tx_queue); 617 extern void efx_farch_tx_fini(struct efx_tx_queue *tx_queue); 618 extern void efx_farch_tx_remove(struct efx_tx_queue *tx_queue); 619 extern void efx_farch_tx_write(struct efx_tx_queue *tx_queue); 620 extern int efx_farch_rx_probe(struct efx_rx_queue *rx_queue); 621 extern void efx_farch_rx_init(struct efx_rx_queue *rx_queue); 622 extern void efx_farch_rx_fini(struct efx_rx_queue *rx_queue); 623 extern void efx_farch_rx_remove(struct efx_rx_queue *rx_queue); 624 extern void efx_farch_rx_write(struct efx_rx_queue *rx_queue); 625 extern void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue); 626 extern int efx_farch_ev_probe(struct efx_channel *channel); 627 extern int efx_farch_ev_init(struct efx_channel *channel); 628 extern void efx_farch_ev_fini(struct efx_channel *channel); 629 extern void efx_farch_ev_remove(struct efx_channel *channel); 630 extern int efx_farch_ev_process(struct efx_channel *channel, int quota); 631 extern void efx_farch_ev_read_ack(struct efx_channel *channel); 632 extern void efx_farch_ev_test_generate(struct efx_channel *channel); 633 634 /* Falcon/Siena filter operations */ 635 extern int efx_farch_filter_table_probe(struct efx_nic *efx); 636 extern void efx_farch_filter_table_restore(struct efx_nic *efx); 637 extern void efx_farch_filter_table_remove(struct efx_nic *efx); 638 extern void efx_farch_filter_update_rx_scatter(struct efx_nic *efx); 639 extern s32 efx_farch_filter_insert(struct efx_nic *efx, 640 struct efx_filter_spec *spec, bool replace); 641 extern int efx_farch_filter_remove_safe(struct efx_nic *efx, 642 enum efx_filter_priority priority, 643 u32 filter_id); 644 extern int efx_farch_filter_get_safe(struct efx_nic *efx, 645 enum efx_filter_priority priority, 646 u32 filter_id, struct efx_filter_spec *); 647 extern void efx_farch_filter_clear_rx(struct efx_nic *efx, 648 enum efx_filter_priority priority); 649 extern u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, 650 enum efx_filter_priority priority); 651 extern u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx); 652 extern s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, 653 enum efx_filter_priority priority, 654 u32 *buf, u32 size); 655 #ifdef CONFIG_RFS_ACCEL 656 extern s32 efx_farch_filter_rfs_insert(struct efx_nic *efx, 657 struct efx_filter_spec *spec); 658 extern bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, 659 unsigned int index); 660 #endif 661 extern void efx_farch_filter_sync_rx_mode(struct efx_nic *efx); 662 663 extern bool efx_nic_event_present(struct efx_channel *channel); 664 665 /* Some statistics are computed as A - B where A and B each increase 666 * linearly with some hardware counter(s) and the counters are read 667 * asynchronously. If the counters contributing to B are always read 668 * after those contributing to A, the computed value may be lower than 669 * the true value by some variable amount, and may decrease between 670 * subsequent computations. 671 * 672 * We should never allow statistics to decrease or to exceed the true 673 * value. Since the computed value will never be greater than the 674 * true value, we can achieve this by only storing the computed value 675 * when it increases. 676 */ 677 static inline void efx_update_diff_stat(u64 *stat, u64 diff) 678 { 679 if ((s64)(diff - *stat) > 0) 680 *stat = diff; 681 } 682 683 /* Interrupts */ 684 extern int efx_nic_init_interrupt(struct efx_nic *efx); 685 extern void efx_nic_irq_test_start(struct efx_nic *efx); 686 extern void efx_nic_fini_interrupt(struct efx_nic *efx); 687 688 /* Falcon/Siena interrupts */ 689 extern void efx_farch_irq_enable_master(struct efx_nic *efx); 690 extern void efx_farch_irq_test_generate(struct efx_nic *efx); 691 extern void efx_farch_irq_disable_master(struct efx_nic *efx); 692 extern irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id); 693 extern irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id); 694 extern irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx); 695 696 static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) 697 { 698 return ACCESS_ONCE(channel->event_test_cpu); 699 } 700 static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) 701 { 702 return ACCESS_ONCE(efx->last_irq_cpu); 703 } 704 705 /* Global Resources */ 706 extern int efx_nic_flush_queues(struct efx_nic *efx); 707 extern void siena_prepare_flush(struct efx_nic *efx); 708 extern int efx_farch_fini_dmaq(struct efx_nic *efx); 709 extern void siena_finish_flush(struct efx_nic *efx); 710 extern void falcon_start_nic_stats(struct efx_nic *efx); 711 extern void falcon_stop_nic_stats(struct efx_nic *efx); 712 extern int falcon_reset_xaui(struct efx_nic *efx); 713 extern void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); 714 extern void efx_farch_init_common(struct efx_nic *efx); 715 extern void efx_ef10_handle_drain_event(struct efx_nic *efx); 716 static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx) 717 { 718 efx->type->rx_push_indir_table(efx); 719 } 720 extern void efx_farch_rx_push_indir_table(struct efx_nic *efx); 721 722 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 723 unsigned int len, gfp_t gfp_flags); 724 void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer); 725 726 /* Tests */ 727 struct efx_farch_register_test { 728 unsigned address; 729 efx_oword_t mask; 730 }; 731 extern int efx_farch_test_registers(struct efx_nic *efx, 732 const struct efx_farch_register_test *regs, 733 size_t n_regs); 734 735 extern size_t efx_nic_get_regs_len(struct efx_nic *efx); 736 extern void efx_nic_get_regs(struct efx_nic *efx, void *buf); 737 738 extern size_t 739 efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, 740 const unsigned long *mask, u8 *names); 741 extern void 742 efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, 743 const unsigned long *mask, 744 u64 *stats, const void *dma_buf, bool accumulate); 745 746 #define EFX_MAX_FLUSH_TIME 5000 747 748 extern void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, 749 efx_qword_t *event); 750 751 #endif /* EFX_NIC_H */ 752