1 /**************************************************************************** 2 * Driver for Solarflare network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2005-2013 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 /* Common definitions for all Efx net driver code */ 12 13 #ifndef EF4_NET_DRIVER_H 14 #define EF4_NET_DRIVER_H 15 16 #include <linux/netdevice.h> 17 #include <linux/etherdevice.h> 18 #include <linux/ethtool.h> 19 #include <linux/if_vlan.h> 20 #include <linux/timer.h> 21 #include <linux/mdio.h> 22 #include <linux/list.h> 23 #include <linux/pci.h> 24 #include <linux/device.h> 25 #include <linux/highmem.h> 26 #include <linux/workqueue.h> 27 #include <linux/mutex.h> 28 #include <linux/rwsem.h> 29 #include <linux/vmalloc.h> 30 #include <linux/i2c.h> 31 #include <linux/mtd/mtd.h> 32 #include <net/busy_poll.h> 33 34 #include "enum.h" 35 #include "bitfield.h" 36 #include "filter.h" 37 38 /************************************************************************** 39 * 40 * Build definitions 41 * 42 **************************************************************************/ 43 44 #define EF4_DRIVER_VERSION "4.1" 45 46 #ifdef DEBUG 47 #define EF4_BUG_ON_PARANOID(x) BUG_ON(x) 48 #define EF4_WARN_ON_PARANOID(x) WARN_ON(x) 49 #else 50 #define EF4_BUG_ON_PARANOID(x) do {} while (0) 51 #define EF4_WARN_ON_PARANOID(x) do {} while (0) 52 #endif 53 54 /************************************************************************** 55 * 56 * Efx data structures 57 * 58 **************************************************************************/ 59 60 #define EF4_MAX_CHANNELS 32U 61 #define EF4_MAX_RX_QUEUES EF4_MAX_CHANNELS 62 #define EF4_EXTRA_CHANNEL_IOV 0 63 #define EF4_EXTRA_CHANNEL_PTP 1 64 #define EF4_MAX_EXTRA_CHANNELS 2U 65 66 /* Checksum generation is a per-queue option in hardware, so each 67 * queue visible to the networking core is backed by two hardware TX 68 * queues. */ 69 #define EF4_MAX_TX_TC 2 70 #define EF4_MAX_CORE_TX_QUEUES (EF4_MAX_TX_TC * EF4_MAX_CHANNELS) 71 #define EF4_TXQ_TYPE_OFFLOAD 1 /* flag */ 72 #define EF4_TXQ_TYPE_HIGHPRI 2 /* flag */ 73 #define EF4_TXQ_TYPES 4 74 #define EF4_MAX_TX_QUEUES (EF4_TXQ_TYPES * EF4_MAX_CHANNELS) 75 76 /* Maximum possible MTU the driver supports */ 77 #define EF4_MAX_MTU (9 * 1024) 78 79 /* Minimum MTU, from RFC791 (IP) */ 80 #define EF4_MIN_MTU 68 81 82 /* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page, 83 * and should be a multiple of the cache line size. 84 */ 85 #define EF4_RX_USR_BUF_SIZE (2048 - 256) 86 87 /* If possible, we should ensure cache line alignment at start and end 88 * of every buffer. Otherwise, we just need to ensure 4-byte 89 * alignment of the network header. 90 */ 91 #if NET_IP_ALIGN == 0 92 #define EF4_RX_BUF_ALIGNMENT L1_CACHE_BYTES 93 #else 94 #define EF4_RX_BUF_ALIGNMENT 4 95 #endif 96 97 struct ef4_self_tests; 98 99 /** 100 * struct ef4_buffer - A general-purpose DMA buffer 101 * @addr: host base address of the buffer 102 * @dma_addr: DMA base address of the buffer 103 * @len: Buffer length, in bytes 104 * 105 * The NIC uses these buffers for its interrupt status registers and 106 * MAC stats dumps. 107 */ 108 struct ef4_buffer { 109 void *addr; 110 dma_addr_t dma_addr; 111 unsigned int len; 112 }; 113 114 /** 115 * struct ef4_special_buffer - DMA buffer entered into buffer table 116 * @buf: Standard &struct ef4_buffer 117 * @index: Buffer index within controller;s buffer table 118 * @entries: Number of buffer table entries 119 * 120 * The NIC has a buffer table that maps buffers of size %EF4_BUF_SIZE. 121 * Event and descriptor rings are addressed via one or more buffer 122 * table entries (and so can be physically non-contiguous, although we 123 * currently do not take advantage of that). On Falcon and Siena we 124 * have to take care of allocating and initialising the entries 125 * ourselves. On later hardware this is managed by the firmware and 126 * @index and @entries are left as 0. 127 */ 128 struct ef4_special_buffer { 129 struct ef4_buffer buf; 130 unsigned int index; 131 unsigned int entries; 132 }; 133 134 /** 135 * struct ef4_tx_buffer - buffer state for a TX descriptor 136 * @skb: When @flags & %EF4_TX_BUF_SKB, the associated socket buffer to be 137 * freed when descriptor completes 138 * @option: When @flags & %EF4_TX_BUF_OPTION, a NIC-specific option descriptor. 139 * @dma_addr: DMA address of the fragment. 140 * @flags: Flags for allocation and DMA mapping type 141 * @len: Length of this fragment. 142 * This field is zero when the queue slot is empty. 143 * @unmap_len: Length of this fragment to unmap 144 * @dma_offset: Offset of @dma_addr from the address of the backing DMA mapping. 145 * Only valid if @unmap_len != 0. 146 */ 147 struct ef4_tx_buffer { 148 const struct sk_buff *skb; 149 union { 150 ef4_qword_t option; 151 dma_addr_t dma_addr; 152 }; 153 unsigned short flags; 154 unsigned short len; 155 unsigned short unmap_len; 156 unsigned short dma_offset; 157 }; 158 #define EF4_TX_BUF_CONT 1 /* not last descriptor of packet */ 159 #define EF4_TX_BUF_SKB 2 /* buffer is last part of skb */ 160 #define EF4_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */ 161 #define EF4_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */ 162 163 /** 164 * struct ef4_tx_queue - An Efx TX queue 165 * 166 * This is a ring buffer of TX fragments. 167 * Since the TX completion path always executes on the same 168 * CPU and the xmit path can operate on different CPUs, 169 * performance is increased by ensuring that the completion 170 * path and the xmit path operate on different cache lines. 171 * This is particularly important if the xmit path is always 172 * executing on one CPU which is different from the completion 173 * path. There is also a cache line for members which are 174 * read but not written on the fast path. 175 * 176 * @efx: The associated Efx NIC 177 * @queue: DMA queue number 178 * @channel: The associated channel 179 * @core_txq: The networking core TX queue structure 180 * @buffer: The software buffer ring 181 * @cb_page: Array of pages of copy buffers. Carved up according to 182 * %EF4_TX_CB_ORDER into %EF4_TX_CB_SIZE-sized chunks. 183 * @txd: The hardware descriptor ring 184 * @ptr_mask: The size of the ring minus 1. 185 * @initialised: Has hardware queue been initialised? 186 * @tx_min_size: Minimum transmit size for this queue. Depends on HW. 187 * @read_count: Current read pointer. 188 * This is the number of buffers that have been removed from both rings. 189 * @old_write_count: The value of @write_count when last checked. 190 * This is here for performance reasons. The xmit path will 191 * only get the up-to-date value of @write_count if this 192 * variable indicates that the queue is empty. This is to 193 * avoid cache-line ping-pong between the xmit path and the 194 * completion path. 195 * @merge_events: Number of TX merged completion events 196 * @insert_count: Current insert pointer 197 * This is the number of buffers that have been added to the 198 * software ring. 199 * @write_count: Current write pointer 200 * This is the number of buffers that have been added to the 201 * hardware ring. 202 * @old_read_count: The value of read_count when last checked. 203 * This is here for performance reasons. The xmit path will 204 * only get the up-to-date value of read_count if this 205 * variable indicates that the queue is full. This is to 206 * avoid cache-line ping-pong between the xmit path and the 207 * completion path. 208 * @pushes: Number of times the TX push feature has been used 209 * @xmit_more_available: Are any packets waiting to be pushed to the NIC 210 * @cb_packets: Number of times the TX copybreak feature has been used 211 * @empty_read_count: If the completion path has seen the queue as empty 212 * and the transmission path has not yet checked this, the value of 213 * @read_count bitwise-added to %EF4_EMPTY_COUNT_VALID; otherwise 0. 214 */ 215 struct ef4_tx_queue { 216 /* Members which don't change on the fast path */ 217 struct ef4_nic *efx ____cacheline_aligned_in_smp; 218 unsigned queue; 219 struct ef4_channel *channel; 220 struct netdev_queue *core_txq; 221 struct ef4_tx_buffer *buffer; 222 struct ef4_buffer *cb_page; 223 struct ef4_special_buffer txd; 224 unsigned int ptr_mask; 225 bool initialised; 226 unsigned int tx_min_size; 227 228 /* Function pointers used in the fast path. */ 229 int (*handle_tso)(struct ef4_tx_queue*, struct sk_buff*, bool *); 230 231 /* Members used mainly on the completion path */ 232 unsigned int read_count ____cacheline_aligned_in_smp; 233 unsigned int old_write_count; 234 unsigned int merge_events; 235 unsigned int bytes_compl; 236 unsigned int pkts_compl; 237 238 /* Members used only on the xmit path */ 239 unsigned int insert_count ____cacheline_aligned_in_smp; 240 unsigned int write_count; 241 unsigned int old_read_count; 242 unsigned int pushes; 243 bool xmit_more_available; 244 unsigned int cb_packets; 245 /* Statistics to supplement MAC stats */ 246 unsigned long tx_packets; 247 248 /* Members shared between paths and sometimes updated */ 249 unsigned int empty_read_count ____cacheline_aligned_in_smp; 250 #define EF4_EMPTY_COUNT_VALID 0x80000000 251 atomic_t flush_outstanding; 252 }; 253 254 #define EF4_TX_CB_ORDER 7 255 #define EF4_TX_CB_SIZE (1 << EF4_TX_CB_ORDER) - NET_IP_ALIGN 256 257 /** 258 * struct ef4_rx_buffer - An Efx RX data buffer 259 * @dma_addr: DMA base address of the buffer 260 * @page: The associated page buffer. 261 * Will be %NULL if the buffer slot is currently free. 262 * @page_offset: If pending: offset in @page of DMA base address. 263 * If completed: offset in @page of Ethernet header. 264 * @len: If pending: length for DMA descriptor. 265 * If completed: received length, excluding hash prefix. 266 * @flags: Flags for buffer and packet state. These are only set on the 267 * first buffer of a scattered packet. 268 */ 269 struct ef4_rx_buffer { 270 dma_addr_t dma_addr; 271 struct page *page; 272 u16 page_offset; 273 u16 len; 274 u16 flags; 275 }; 276 #define EF4_RX_BUF_LAST_IN_PAGE 0x0001 277 #define EF4_RX_PKT_CSUMMED 0x0002 278 #define EF4_RX_PKT_DISCARD 0x0004 279 #define EF4_RX_PKT_TCP 0x0040 280 #define EF4_RX_PKT_PREFIX_LEN 0x0080 /* length is in prefix only */ 281 282 /** 283 * struct ef4_rx_page_state - Page-based rx buffer state 284 * 285 * Inserted at the start of every page allocated for receive buffers. 286 * Used to facilitate sharing dma mappings between recycled rx buffers 287 * and those passed up to the kernel. 288 * 289 * @dma_addr: The dma address of this page. 290 */ 291 struct ef4_rx_page_state { 292 dma_addr_t dma_addr; 293 294 unsigned int __pad[0] ____cacheline_aligned; 295 }; 296 297 /** 298 * struct ef4_rx_queue - An Efx RX queue 299 * @efx: The associated Efx NIC 300 * @core_index: Index of network core RX queue. Will be >= 0 iff this 301 * is associated with a real RX queue. 302 * @buffer: The software buffer ring 303 * @rxd: The hardware descriptor ring 304 * @ptr_mask: The size of the ring minus 1. 305 * @refill_enabled: Enable refill whenever fill level is low 306 * @flush_pending: Set when a RX flush is pending. Has the same lifetime as 307 * @rxq_flush_pending. 308 * @added_count: Number of buffers added to the receive queue. 309 * @notified_count: Number of buffers given to NIC (<= @added_count). 310 * @removed_count: Number of buffers removed from the receive queue. 311 * @scatter_n: Used by NIC specific receive code. 312 * @scatter_len: Used by NIC specific receive code. 313 * @page_ring: The ring to store DMA mapped pages for reuse. 314 * @page_add: Counter to calculate the write pointer for the recycle ring. 315 * @page_remove: Counter to calculate the read pointer for the recycle ring. 316 * @page_recycle_count: The number of pages that have been recycled. 317 * @page_recycle_failed: The number of pages that couldn't be recycled because 318 * the kernel still held a reference to them. 319 * @page_recycle_full: The number of pages that were released because the 320 * recycle ring was full. 321 * @page_ptr_mask: The number of pages in the RX recycle ring minus 1. 322 * @max_fill: RX descriptor maximum fill level (<= ring size) 323 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill 324 * (<= @max_fill) 325 * @min_fill: RX descriptor minimum non-zero fill level. 326 * This records the minimum fill level observed when a ring 327 * refill was triggered. 328 * @recycle_count: RX buffer recycle counter. 329 * @slow_fill: Timer used to defer ef4_nic_generate_fill_event(). 330 */ 331 struct ef4_rx_queue { 332 struct ef4_nic *efx; 333 int core_index; 334 struct ef4_rx_buffer *buffer; 335 struct ef4_special_buffer rxd; 336 unsigned int ptr_mask; 337 bool refill_enabled; 338 bool flush_pending; 339 340 unsigned int added_count; 341 unsigned int notified_count; 342 unsigned int removed_count; 343 unsigned int scatter_n; 344 unsigned int scatter_len; 345 struct page **page_ring; 346 unsigned int page_add; 347 unsigned int page_remove; 348 unsigned int page_recycle_count; 349 unsigned int page_recycle_failed; 350 unsigned int page_recycle_full; 351 unsigned int page_ptr_mask; 352 unsigned int max_fill; 353 unsigned int fast_fill_trigger; 354 unsigned int min_fill; 355 unsigned int min_overfill; 356 unsigned int recycle_count; 357 struct timer_list slow_fill; 358 unsigned int slow_fill_count; 359 /* Statistics to supplement MAC stats */ 360 unsigned long rx_packets; 361 }; 362 363 /** 364 * struct ef4_channel - An Efx channel 365 * 366 * A channel comprises an event queue, at least one TX queue, at least 367 * one RX queue, and an associated tasklet for processing the event 368 * queue. 369 * 370 * @efx: Associated Efx NIC 371 * @channel: Channel instance number 372 * @type: Channel type definition 373 * @eventq_init: Event queue initialised flag 374 * @enabled: Channel enabled indicator 375 * @irq: IRQ number (MSI and MSI-X only) 376 * @irq_moderation_us: IRQ moderation value (in microseconds) 377 * @napi_dev: Net device used with NAPI 378 * @napi_str: NAPI control structure 379 * @state: state for NAPI vs busy polling 380 * @state_lock: lock protecting @state 381 * @eventq: Event queue buffer 382 * @eventq_mask: Event queue pointer mask 383 * @eventq_read_ptr: Event queue read pointer 384 * @event_test_cpu: Last CPU to handle interrupt or test event for this channel 385 * @irq_count: Number of IRQs since last adaptive moderation decision 386 * @irq_mod_score: IRQ moderation score 387 * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, 388 * indexed by filter ID 389 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors 390 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors 391 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors 392 * @n_rx_mcast_mismatch: Count of unmatched multicast frames 393 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors 394 * @n_rx_overlength: Count of RX_OVERLENGTH errors 395 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun 396 * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to 397 * lack of descriptors 398 * @n_rx_merge_events: Number of RX merged completion events 399 * @n_rx_merge_packets: Number of RX packets completed by merged events 400 * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by 401 * __ef4_rx_packet(), or zero if there is none 402 * @rx_pkt_index: Ring index of first buffer for next packet to be delivered 403 * by __ef4_rx_packet(), if @rx_pkt_n_frags != 0 404 * @rx_queue: RX queue for this channel 405 * @tx_queue: TX queues for this channel 406 */ 407 struct ef4_channel { 408 struct ef4_nic *efx; 409 int channel; 410 const struct ef4_channel_type *type; 411 bool eventq_init; 412 bool enabled; 413 int irq; 414 unsigned int irq_moderation_us; 415 struct net_device *napi_dev; 416 struct napi_struct napi_str; 417 #ifdef CONFIG_NET_RX_BUSY_POLL 418 unsigned long busy_poll_state; 419 #endif 420 struct ef4_special_buffer eventq; 421 unsigned int eventq_mask; 422 unsigned int eventq_read_ptr; 423 int event_test_cpu; 424 425 unsigned int irq_count; 426 unsigned int irq_mod_score; 427 #ifdef CONFIG_RFS_ACCEL 428 unsigned int rfs_filters_added; 429 #define RPS_FLOW_ID_INVALID 0xFFFFFFFF 430 u32 *rps_flow_id; 431 #endif 432 433 unsigned n_rx_tobe_disc; 434 unsigned n_rx_ip_hdr_chksum_err; 435 unsigned n_rx_tcp_udp_chksum_err; 436 unsigned n_rx_mcast_mismatch; 437 unsigned n_rx_frm_trunc; 438 unsigned n_rx_overlength; 439 unsigned n_skbuff_leaks; 440 unsigned int n_rx_nodesc_trunc; 441 unsigned int n_rx_merge_events; 442 unsigned int n_rx_merge_packets; 443 444 unsigned int rx_pkt_n_frags; 445 unsigned int rx_pkt_index; 446 447 struct ef4_rx_queue rx_queue; 448 struct ef4_tx_queue tx_queue[EF4_TXQ_TYPES]; 449 }; 450 451 #ifdef CONFIG_NET_RX_BUSY_POLL 452 enum ef4_channel_busy_poll_state { 453 EF4_CHANNEL_STATE_IDLE = 0, 454 EF4_CHANNEL_STATE_NAPI = BIT(0), 455 EF4_CHANNEL_STATE_NAPI_REQ_BIT = 1, 456 EF4_CHANNEL_STATE_NAPI_REQ = BIT(1), 457 EF4_CHANNEL_STATE_POLL_BIT = 2, 458 EF4_CHANNEL_STATE_POLL = BIT(2), 459 EF4_CHANNEL_STATE_DISABLE_BIT = 3, 460 }; 461 462 static inline void ef4_channel_busy_poll_init(struct ef4_channel *channel) 463 { 464 WRITE_ONCE(channel->busy_poll_state, EF4_CHANNEL_STATE_IDLE); 465 } 466 467 /* Called from the device poll routine to get ownership of a channel. */ 468 static inline bool ef4_channel_lock_napi(struct ef4_channel *channel) 469 { 470 unsigned long prev, old = READ_ONCE(channel->busy_poll_state); 471 472 while (1) { 473 switch (old) { 474 case EF4_CHANNEL_STATE_POLL: 475 /* Ensure ef4_channel_try_lock_poll() wont starve us */ 476 set_bit(EF4_CHANNEL_STATE_NAPI_REQ_BIT, 477 &channel->busy_poll_state); 478 /* fallthrough */ 479 case EF4_CHANNEL_STATE_POLL | EF4_CHANNEL_STATE_NAPI_REQ: 480 return false; 481 default: 482 break; 483 } 484 prev = cmpxchg(&channel->busy_poll_state, old, 485 EF4_CHANNEL_STATE_NAPI); 486 if (unlikely(prev != old)) { 487 /* This is likely to mean we've just entered polling 488 * state. Go back round to set the REQ bit. 489 */ 490 old = prev; 491 continue; 492 } 493 return true; 494 } 495 } 496 497 static inline void ef4_channel_unlock_napi(struct ef4_channel *channel) 498 { 499 /* Make sure write has completed from ef4_channel_lock_napi() */ 500 smp_wmb(); 501 WRITE_ONCE(channel->busy_poll_state, EF4_CHANNEL_STATE_IDLE); 502 } 503 504 /* Called from ef4_busy_poll(). */ 505 static inline bool ef4_channel_try_lock_poll(struct ef4_channel *channel) 506 { 507 return cmpxchg(&channel->busy_poll_state, EF4_CHANNEL_STATE_IDLE, 508 EF4_CHANNEL_STATE_POLL) == EF4_CHANNEL_STATE_IDLE; 509 } 510 511 static inline void ef4_channel_unlock_poll(struct ef4_channel *channel) 512 { 513 clear_bit_unlock(EF4_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state); 514 } 515 516 static inline bool ef4_channel_busy_polling(struct ef4_channel *channel) 517 { 518 return test_bit(EF4_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state); 519 } 520 521 static inline void ef4_channel_enable(struct ef4_channel *channel) 522 { 523 clear_bit_unlock(EF4_CHANNEL_STATE_DISABLE_BIT, 524 &channel->busy_poll_state); 525 } 526 527 /* Stop further polling or napi access. 528 * Returns false if the channel is currently busy polling. 529 */ 530 static inline bool ef4_channel_disable(struct ef4_channel *channel) 531 { 532 set_bit(EF4_CHANNEL_STATE_DISABLE_BIT, &channel->busy_poll_state); 533 /* Implicit barrier in ef4_channel_busy_polling() */ 534 return !ef4_channel_busy_polling(channel); 535 } 536 537 #else /* CONFIG_NET_RX_BUSY_POLL */ 538 539 static inline void ef4_channel_busy_poll_init(struct ef4_channel *channel) 540 { 541 } 542 543 static inline bool ef4_channel_lock_napi(struct ef4_channel *channel) 544 { 545 return true; 546 } 547 548 static inline void ef4_channel_unlock_napi(struct ef4_channel *channel) 549 { 550 } 551 552 static inline bool ef4_channel_try_lock_poll(struct ef4_channel *channel) 553 { 554 return false; 555 } 556 557 static inline void ef4_channel_unlock_poll(struct ef4_channel *channel) 558 { 559 } 560 561 static inline bool ef4_channel_busy_polling(struct ef4_channel *channel) 562 { 563 return false; 564 } 565 566 static inline void ef4_channel_enable(struct ef4_channel *channel) 567 { 568 } 569 570 static inline bool ef4_channel_disable(struct ef4_channel *channel) 571 { 572 return true; 573 } 574 #endif /* CONFIG_NET_RX_BUSY_POLL */ 575 576 /** 577 * struct ef4_msi_context - Context for each MSI 578 * @efx: The associated NIC 579 * @index: Index of the channel/IRQ 580 * @name: Name of the channel/IRQ 581 * 582 * Unlike &struct ef4_channel, this is never reallocated and is always 583 * safe for the IRQ handler to access. 584 */ 585 struct ef4_msi_context { 586 struct ef4_nic *efx; 587 unsigned int index; 588 char name[IFNAMSIZ + 6]; 589 }; 590 591 /** 592 * struct ef4_channel_type - distinguishes traffic and extra channels 593 * @handle_no_channel: Handle failure to allocate an extra channel 594 * @pre_probe: Set up extra state prior to initialisation 595 * @post_remove: Tear down extra state after finalisation, if allocated. 596 * May be called on channels that have not been probed. 597 * @get_name: Generate the channel's name (used for its IRQ handler) 598 * @copy: Copy the channel state prior to reallocation. May be %NULL if 599 * reallocation is not supported. 600 * @receive_skb: Handle an skb ready to be passed to netif_receive_skb() 601 * @keep_eventq: Flag for whether event queue should be kept initialised 602 * while the device is stopped 603 */ 604 struct ef4_channel_type { 605 void (*handle_no_channel)(struct ef4_nic *); 606 int (*pre_probe)(struct ef4_channel *); 607 void (*post_remove)(struct ef4_channel *); 608 void (*get_name)(struct ef4_channel *, char *buf, size_t len); 609 struct ef4_channel *(*copy)(const struct ef4_channel *); 610 bool (*receive_skb)(struct ef4_channel *, struct sk_buff *); 611 bool keep_eventq; 612 }; 613 614 enum ef4_led_mode { 615 EF4_LED_OFF = 0, 616 EF4_LED_ON = 1, 617 EF4_LED_DEFAULT = 2 618 }; 619 620 #define STRING_TABLE_LOOKUP(val, member) \ 621 ((val) < member ## _max) ? member ## _names[val] : "(invalid)" 622 623 extern const char *const ef4_loopback_mode_names[]; 624 extern const unsigned int ef4_loopback_mode_max; 625 #define LOOPBACK_MODE(efx) \ 626 STRING_TABLE_LOOKUP((efx)->loopback_mode, ef4_loopback_mode) 627 628 extern const char *const ef4_reset_type_names[]; 629 extern const unsigned int ef4_reset_type_max; 630 #define RESET_TYPE(type) \ 631 STRING_TABLE_LOOKUP(type, ef4_reset_type) 632 633 enum ef4_int_mode { 634 /* Be careful if altering to correct macro below */ 635 EF4_INT_MODE_MSIX = 0, 636 EF4_INT_MODE_MSI = 1, 637 EF4_INT_MODE_LEGACY = 2, 638 EF4_INT_MODE_MAX /* Insert any new items before this */ 639 }; 640 #define EF4_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EF4_INT_MODE_MSI) 641 642 enum nic_state { 643 STATE_UNINIT = 0, /* device being probed/removed or is frozen */ 644 STATE_READY = 1, /* hardware ready and netdev registered */ 645 STATE_DISABLED = 2, /* device disabled due to hardware errors */ 646 STATE_RECOVERY = 3, /* device recovering from PCI error */ 647 }; 648 649 /* Forward declaration */ 650 struct ef4_nic; 651 652 /* Pseudo bit-mask flow control field */ 653 #define EF4_FC_RX FLOW_CTRL_RX 654 #define EF4_FC_TX FLOW_CTRL_TX 655 #define EF4_FC_AUTO 4 656 657 /** 658 * struct ef4_link_state - Current state of the link 659 * @up: Link is up 660 * @fd: Link is full-duplex 661 * @fc: Actual flow control flags 662 * @speed: Link speed (Mbps) 663 */ 664 struct ef4_link_state { 665 bool up; 666 bool fd; 667 u8 fc; 668 unsigned int speed; 669 }; 670 671 static inline bool ef4_link_state_equal(const struct ef4_link_state *left, 672 const struct ef4_link_state *right) 673 { 674 return left->up == right->up && left->fd == right->fd && 675 left->fc == right->fc && left->speed == right->speed; 676 } 677 678 /** 679 * struct ef4_phy_operations - Efx PHY operations table 680 * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds, 681 * efx->loopback_modes. 682 * @init: Initialise PHY 683 * @fini: Shut down PHY 684 * @reconfigure: Reconfigure PHY (e.g. for new link parameters) 685 * @poll: Update @link_state and report whether it changed. 686 * Serialised by the mac_lock. 687 * @get_settings: Get ethtool settings. Serialised by the mac_lock. 688 * @set_settings: Set ethtool settings. Serialised by the mac_lock. 689 * @set_npage_adv: Set abilities advertised in (Extended) Next Page 690 * (only needed where AN bit is set in mmds) 691 * @test_alive: Test that PHY is 'alive' (online) 692 * @test_name: Get the name of a PHY-specific test/result 693 * @run_tests: Run tests and record results as appropriate (offline). 694 * Flags are the ethtool tests flags. 695 */ 696 struct ef4_phy_operations { 697 int (*probe) (struct ef4_nic *efx); 698 int (*init) (struct ef4_nic *efx); 699 void (*fini) (struct ef4_nic *efx); 700 void (*remove) (struct ef4_nic *efx); 701 int (*reconfigure) (struct ef4_nic *efx); 702 bool (*poll) (struct ef4_nic *efx); 703 void (*get_settings) (struct ef4_nic *efx, 704 struct ethtool_cmd *ecmd); 705 int (*set_settings) (struct ef4_nic *efx, 706 struct ethtool_cmd *ecmd); 707 void (*set_npage_adv) (struct ef4_nic *efx, u32); 708 int (*test_alive) (struct ef4_nic *efx); 709 const char *(*test_name) (struct ef4_nic *efx, unsigned int index); 710 int (*run_tests) (struct ef4_nic *efx, int *results, unsigned flags); 711 int (*get_module_eeprom) (struct ef4_nic *efx, 712 struct ethtool_eeprom *ee, 713 u8 *data); 714 int (*get_module_info) (struct ef4_nic *efx, 715 struct ethtool_modinfo *modinfo); 716 }; 717 718 /** 719 * enum ef4_phy_mode - PHY operating mode flags 720 * @PHY_MODE_NORMAL: on and should pass traffic 721 * @PHY_MODE_TX_DISABLED: on with TX disabled 722 * @PHY_MODE_LOW_POWER: set to low power through MDIO 723 * @PHY_MODE_OFF: switched off through external control 724 * @PHY_MODE_SPECIAL: on but will not pass traffic 725 */ 726 enum ef4_phy_mode { 727 PHY_MODE_NORMAL = 0, 728 PHY_MODE_TX_DISABLED = 1, 729 PHY_MODE_LOW_POWER = 2, 730 PHY_MODE_OFF = 4, 731 PHY_MODE_SPECIAL = 8, 732 }; 733 734 static inline bool ef4_phy_mode_disabled(enum ef4_phy_mode mode) 735 { 736 return !!(mode & ~PHY_MODE_TX_DISABLED); 737 } 738 739 /** 740 * struct ef4_hw_stat_desc - Description of a hardware statistic 741 * @name: Name of the statistic as visible through ethtool, or %NULL if 742 * it should not be exposed 743 * @dma_width: Width in bits (0 for non-DMA statistics) 744 * @offset: Offset within stats (ignored for non-DMA statistics) 745 */ 746 struct ef4_hw_stat_desc { 747 const char *name; 748 u16 dma_width; 749 u16 offset; 750 }; 751 752 /* Number of bits used in a multicast filter hash address */ 753 #define EF4_MCAST_HASH_BITS 8 754 755 /* Number of (single-bit) entries in a multicast filter hash */ 756 #define EF4_MCAST_HASH_ENTRIES (1 << EF4_MCAST_HASH_BITS) 757 758 /* An Efx multicast filter hash */ 759 union ef4_multicast_hash { 760 u8 byte[EF4_MCAST_HASH_ENTRIES / 8]; 761 ef4_oword_t oword[EF4_MCAST_HASH_ENTRIES / sizeof(ef4_oword_t) / 8]; 762 }; 763 764 /** 765 * struct ef4_nic - an Efx NIC 766 * @name: Device name (net device name or bus id before net device registered) 767 * @pci_dev: The PCI device 768 * @node: List node for maintaning primary/secondary function lists 769 * @primary: &struct ef4_nic instance for the primary function of this 770 * controller. May be the same structure, and may be %NULL if no 771 * primary function is bound. Serialised by rtnl_lock. 772 * @secondary_list: List of &struct ef4_nic instances for the secondary PCI 773 * functions of the controller, if this is for the primary function. 774 * Serialised by rtnl_lock. 775 * @type: Controller type attributes 776 * @legacy_irq: IRQ number 777 * @workqueue: Workqueue for port reconfigures and the HW monitor. 778 * Work items do not hold and must not acquire RTNL. 779 * @workqueue_name: Name of workqueue 780 * @reset_work: Scheduled reset workitem 781 * @membase_phys: Memory BAR value as physical address 782 * @membase: Memory BAR value 783 * @interrupt_mode: Interrupt mode 784 * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds 785 * @timer_max_ns: Interrupt timer maximum value, in nanoseconds 786 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues 787 * @irq_rx_mod_step_us: Step size for IRQ moderation for RX event queues 788 * @irq_rx_moderation_us: IRQ moderation time for RX event queues 789 * @msg_enable: Log message enable flags 790 * @state: Device state number (%STATE_*). Serialised by the rtnl_lock. 791 * @reset_pending: Bitmask for pending resets 792 * @tx_queue: TX DMA queues 793 * @rx_queue: RX DMA queues 794 * @channel: Channels 795 * @msi_context: Context for each MSI 796 * @extra_channel_types: Types of extra (non-traffic) channels that 797 * should be allocated for this NIC 798 * @rxq_entries: Size of receive queues requested by user. 799 * @txq_entries: Size of transmit queues requested by user. 800 * @txq_stop_thresh: TX queue fill level at or above which we stop it. 801 * @txq_wake_thresh: TX queue fill level at or below which we wake it. 802 * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches 803 * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches 804 * @sram_lim_qw: Qword address limit of SRAM 805 * @next_buffer_table: First available buffer table id 806 * @n_channels: Number of channels in use 807 * @n_rx_channels: Number of channels used for RX (= number of RX queues) 808 * @n_tx_channels: Number of channels used for TX 809 * @rx_ip_align: RX DMA address offset to have IP header aligned in 810 * in accordance with NET_IP_ALIGN 811 * @rx_dma_len: Current maximum RX DMA length 812 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 813 * @rx_buffer_truesize: Amortised allocation size of an RX buffer, 814 * for use in sk_buff::truesize 815 * @rx_prefix_size: Size of RX prefix before packet data 816 * @rx_packet_hash_offset: Offset of RX flow hash from start of packet data 817 * (valid only if @rx_prefix_size != 0; always negative) 818 * @rx_packet_len_offset: Offset of RX packet length from start of packet data 819 * (valid only for NICs that set %EF4_RX_PKT_PREFIX_LEN; always negative) 820 * @rx_packet_ts_offset: Offset of timestamp from start of packet data 821 * (valid only if channel->sync_timestamps_enabled; always negative) 822 * @rx_hash_key: Toeplitz hash key for RSS 823 * @rx_indir_table: Indirection table for RSS 824 * @rx_scatter: Scatter mode enabled for receives 825 * @int_error_count: Number of internal errors seen recently 826 * @int_error_expire: Time at which error count will be expired 827 * @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will 828 * acknowledge but do nothing else. 829 * @irq_status: Interrupt status buffer 830 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 831 * @irq_level: IRQ level/index for IRQs not triggered by an event queue 832 * @selftest_work: Work item for asynchronous self-test 833 * @mtd_list: List of MTDs attached to the NIC 834 * @nic_data: Hardware dependent state 835 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, 836 * ef4_monitor() and ef4_reconfigure_port() 837 * @port_enabled: Port enabled indicator. 838 * Serialises ef4_stop_all(), ef4_start_all(), ef4_monitor() and 839 * ef4_mac_work() with kernel interfaces. Safe to read under any 840 * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must 841 * be held to modify it. 842 * @port_initialized: Port initialized? 843 * @net_dev: Operating system network device. Consider holding the rtnl lock 844 * @fixed_features: Features which cannot be turned off 845 * @stats_buffer: DMA buffer for statistics 846 * @phy_type: PHY type 847 * @phy_op: PHY interface 848 * @phy_data: PHY private data (including PHY-specific stats) 849 * @mdio: PHY MDIO interface 850 * @phy_mode: PHY operating mode. Serialised by @mac_lock. 851 * @link_advertising: Autonegotiation advertising flags 852 * @link_state: Current state of the link 853 * @n_link_state_changes: Number of times the link has changed state 854 * @unicast_filter: Flag for Falcon-arch simple unicast filter. 855 * Protected by @mac_lock. 856 * @multicast_hash: Multicast hash table for Falcon-arch. 857 * Protected by @mac_lock. 858 * @wanted_fc: Wanted flow control flags 859 * @fc_disable: When non-zero flow control is disabled. Typically used to 860 * ensure that network back pressure doesn't delay dma queue flushes. 861 * Serialised by the rtnl lock. 862 * @mac_work: Work item for changing MAC promiscuity and multicast hash 863 * @loopback_mode: Loopback status 864 * @loopback_modes: Supported loopback mode bitmask 865 * @loopback_selftest: Offline self-test private state 866 * @filter_sem: Filter table rw_semaphore, for freeing the table 867 * @filter_lock: Filter table lock, for mere content changes 868 * @filter_state: Architecture-dependent filter table state 869 * @rps_expire_channel: Next channel to check for expiry 870 * @rps_expire_index: Next index to check for expiry in 871 * @rps_expire_channel's @rps_flow_id 872 * @active_queues: Count of RX and TX queues that haven't been flushed and drained. 873 * @rxq_flush_pending: Count of number of receive queues that need to be flushed. 874 * Decremented when the ef4_flush_rx_queue() is called. 875 * @rxq_flush_outstanding: Count of number of RX flushes started but not yet 876 * completed (either success or failure). Not used when MCDI is used to 877 * flush receive queues. 878 * @flush_wq: wait queue used by ef4_nic_flush_queues() to wait for flush completions. 879 * @vpd_sn: Serial number read from VPD 880 * @monitor_work: Hardware monitor workitem 881 * @biu_lock: BIU (bus interface unit) lock 882 * @last_irq_cpu: Last CPU to handle a possible test interrupt. This 883 * field is used by ef4_test_interrupts() to verify that an 884 * interrupt has occurred. 885 * @stats_lock: Statistics update lock. Must be held when calling 886 * ef4_nic_type::{update,start,stop}_stats. 887 * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb 888 * 889 * This is stored in the private area of the &struct net_device. 890 */ 891 struct ef4_nic { 892 /* The following fields should be written very rarely */ 893 894 char name[IFNAMSIZ]; 895 struct list_head node; 896 struct ef4_nic *primary; 897 struct list_head secondary_list; 898 struct pci_dev *pci_dev; 899 unsigned int port_num; 900 const struct ef4_nic_type *type; 901 int legacy_irq; 902 bool eeh_disabled_legacy_irq; 903 struct workqueue_struct *workqueue; 904 char workqueue_name[16]; 905 struct work_struct reset_work; 906 resource_size_t membase_phys; 907 void __iomem *membase; 908 909 enum ef4_int_mode interrupt_mode; 910 unsigned int timer_quantum_ns; 911 unsigned int timer_max_ns; 912 bool irq_rx_adaptive; 913 unsigned int irq_mod_step_us; 914 unsigned int irq_rx_moderation_us; 915 u32 msg_enable; 916 917 enum nic_state state; 918 unsigned long reset_pending; 919 920 struct ef4_channel *channel[EF4_MAX_CHANNELS]; 921 struct ef4_msi_context msi_context[EF4_MAX_CHANNELS]; 922 const struct ef4_channel_type * 923 extra_channel_type[EF4_MAX_EXTRA_CHANNELS]; 924 925 unsigned rxq_entries; 926 unsigned txq_entries; 927 unsigned int txq_stop_thresh; 928 unsigned int txq_wake_thresh; 929 930 unsigned tx_dc_base; 931 unsigned rx_dc_base; 932 unsigned sram_lim_qw; 933 unsigned next_buffer_table; 934 935 unsigned int max_channels; 936 unsigned int max_tx_channels; 937 unsigned n_channels; 938 unsigned n_rx_channels; 939 unsigned rss_spread; 940 unsigned tx_channel_offset; 941 unsigned n_tx_channels; 942 unsigned int rx_ip_align; 943 unsigned int rx_dma_len; 944 unsigned int rx_buffer_order; 945 unsigned int rx_buffer_truesize; 946 unsigned int rx_page_buf_step; 947 unsigned int rx_bufs_per_page; 948 unsigned int rx_pages_per_batch; 949 unsigned int rx_prefix_size; 950 int rx_packet_hash_offset; 951 int rx_packet_len_offset; 952 int rx_packet_ts_offset; 953 u8 rx_hash_key[40]; 954 u32 rx_indir_table[128]; 955 bool rx_scatter; 956 957 unsigned int_error_count; 958 unsigned long int_error_expire; 959 960 bool irq_soft_enabled; 961 struct ef4_buffer irq_status; 962 unsigned irq_zero_count; 963 unsigned irq_level; 964 struct delayed_work selftest_work; 965 966 #ifdef CONFIG_SFC_FALCON_MTD 967 struct list_head mtd_list; 968 #endif 969 970 void *nic_data; 971 972 struct mutex mac_lock; 973 struct work_struct mac_work; 974 bool port_enabled; 975 976 bool mc_bist_for_other_fn; 977 bool port_initialized; 978 struct net_device *net_dev; 979 980 netdev_features_t fixed_features; 981 982 struct ef4_buffer stats_buffer; 983 u64 rx_nodesc_drops_total; 984 u64 rx_nodesc_drops_while_down; 985 bool rx_nodesc_drops_prev_state; 986 987 unsigned int phy_type; 988 const struct ef4_phy_operations *phy_op; 989 void *phy_data; 990 struct mdio_if_info mdio; 991 enum ef4_phy_mode phy_mode; 992 993 u32 link_advertising; 994 struct ef4_link_state link_state; 995 unsigned int n_link_state_changes; 996 997 bool unicast_filter; 998 union ef4_multicast_hash multicast_hash; 999 u8 wanted_fc; 1000 unsigned fc_disable; 1001 1002 atomic_t rx_reset; 1003 enum ef4_loopback_mode loopback_mode; 1004 u64 loopback_modes; 1005 1006 void *loopback_selftest; 1007 1008 struct rw_semaphore filter_sem; 1009 spinlock_t filter_lock; 1010 void *filter_state; 1011 #ifdef CONFIG_RFS_ACCEL 1012 unsigned int rps_expire_channel; 1013 unsigned int rps_expire_index; 1014 #endif 1015 1016 atomic_t active_queues; 1017 atomic_t rxq_flush_pending; 1018 atomic_t rxq_flush_outstanding; 1019 wait_queue_head_t flush_wq; 1020 1021 char *vpd_sn; 1022 1023 /* The following fields may be written more often */ 1024 1025 struct delayed_work monitor_work ____cacheline_aligned_in_smp; 1026 spinlock_t biu_lock; 1027 int last_irq_cpu; 1028 spinlock_t stats_lock; 1029 atomic_t n_rx_noskb_drops; 1030 }; 1031 1032 static inline int ef4_dev_registered(struct ef4_nic *efx) 1033 { 1034 return efx->net_dev->reg_state == NETREG_REGISTERED; 1035 } 1036 1037 static inline unsigned int ef4_port_num(struct ef4_nic *efx) 1038 { 1039 return efx->port_num; 1040 } 1041 1042 struct ef4_mtd_partition { 1043 struct list_head node; 1044 struct mtd_info mtd; 1045 const char *dev_type_name; 1046 const char *type_name; 1047 char name[IFNAMSIZ + 20]; 1048 }; 1049 1050 /** 1051 * struct ef4_nic_type - Efx device type definition 1052 * @mem_bar: Get the memory BAR 1053 * @mem_map_size: Get memory BAR mapped size 1054 * @probe: Probe the controller 1055 * @remove: Free resources allocated by probe() 1056 * @init: Initialise the controller 1057 * @dimension_resources: Dimension controller resources (buffer table, 1058 * and VIs once the available interrupt resources are clear) 1059 * @fini: Shut down the controller 1060 * @monitor: Periodic function for polling link state and hardware monitor 1061 * @map_reset_reason: Map ethtool reset reason to a reset method 1062 * @map_reset_flags: Map ethtool reset flags to a reset method, if possible 1063 * @reset: Reset the controller hardware and possibly the PHY. This will 1064 * be called while the controller is uninitialised. 1065 * @probe_port: Probe the MAC and PHY 1066 * @remove_port: Free resources allocated by probe_port() 1067 * @handle_global_event: Handle a "global" event (may be %NULL) 1068 * @fini_dmaq: Flush and finalise DMA queues (RX and TX queues) 1069 * @prepare_flush: Prepare the hardware for flushing the DMA queues 1070 * (for Falcon architecture) 1071 * @finish_flush: Clean up after flushing the DMA queues (for Falcon 1072 * architecture) 1073 * @prepare_flr: Prepare for an FLR 1074 * @finish_flr: Clean up after an FLR 1075 * @describe_stats: Describe statistics for ethtool 1076 * @update_stats: Update statistics not provided by event handling. 1077 * Either argument may be %NULL. 1078 * @start_stats: Start the regular fetching of statistics 1079 * @pull_stats: Pull stats from the NIC and wait until they arrive. 1080 * @stop_stats: Stop the regular fetching of statistics 1081 * @set_id_led: Set state of identifying LED or revert to automatic function 1082 * @push_irq_moderation: Apply interrupt moderation value 1083 * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY 1084 * @prepare_enable_fc_tx: Prepare MAC to enable pause frame TX (may be %NULL) 1085 * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings 1086 * to the hardware. Serialised by the mac_lock. 1087 * @check_mac_fault: Check MAC fault state. True if fault present. 1088 * @get_wol: Get WoL configuration from driver state 1089 * @set_wol: Push WoL configuration to the NIC 1090 * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) 1091 * @test_chip: Test registers. May use ef4_farch_test_registers(), and is 1092 * expected to reset the NIC. 1093 * @test_nvram: Test validity of NVRAM contents 1094 * @irq_enable_master: Enable IRQs on the NIC. Each event queue must 1095 * be separately enabled after this. 1096 * @irq_test_generate: Generate a test IRQ 1097 * @irq_disable_non_ev: Disable non-event IRQs on the NIC. Each event 1098 * queue must be separately disabled before this. 1099 * @irq_handle_msi: Handle MSI for a channel. The @dev_id argument is 1100 * a pointer to the &struct ef4_msi_context for the channel. 1101 * @irq_handle_legacy: Handle legacy interrupt. The @dev_id argument 1102 * is a pointer to the &struct ef4_nic. 1103 * @tx_probe: Allocate resources for TX queue 1104 * @tx_init: Initialise TX queue on the NIC 1105 * @tx_remove: Free resources for TX queue 1106 * @tx_write: Write TX descriptors and doorbell 1107 * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC 1108 * @rx_probe: Allocate resources for RX queue 1109 * @rx_init: Initialise RX queue on the NIC 1110 * @rx_remove: Free resources for RX queue 1111 * @rx_write: Write RX descriptors and doorbell 1112 * @rx_defer_refill: Generate a refill reminder event 1113 * @ev_probe: Allocate resources for event queue 1114 * @ev_init: Initialise event queue on the NIC 1115 * @ev_fini: Deinitialise event queue on the NIC 1116 * @ev_remove: Free resources for event queue 1117 * @ev_process: Process events for a queue, up to the given NAPI quota 1118 * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ 1119 * @ev_test_generate: Generate a test event 1120 * @filter_table_probe: Probe filter capabilities and set up filter software state 1121 * @filter_table_restore: Restore filters removed from hardware 1122 * @filter_table_remove: Remove filters from hardware and tear down software state 1123 * @filter_update_rx_scatter: Update filters after change to rx scatter setting 1124 * @filter_insert: add or replace a filter 1125 * @filter_remove_safe: remove a filter by ID, carefully 1126 * @filter_get_safe: retrieve a filter by ID, carefully 1127 * @filter_clear_rx: Remove all RX filters whose priority is less than or 1128 * equal to the given priority and is not %EF4_FILTER_PRI_AUTO 1129 * @filter_count_rx_used: Get the number of filters in use at a given priority 1130 * @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1 1131 * @filter_get_rx_ids: Get list of RX filters at a given priority 1132 * @filter_rfs_insert: Add or replace a filter for RFS. This must be 1133 * atomic. The hardware change may be asynchronous but should 1134 * not be delayed for long. It may fail if this can't be done 1135 * atomically. 1136 * @filter_rfs_expire_one: Consider expiring a filter inserted for RFS. 1137 * This must check whether the specified table entry is used by RFS 1138 * and that rps_may_expire_flow() returns true for it. 1139 * @mtd_probe: Probe and add MTD partitions associated with this net device, 1140 * using ef4_mtd_add() 1141 * @mtd_rename: Set an MTD partition name using the net device name 1142 * @mtd_read: Read from an MTD partition 1143 * @mtd_erase: Erase part of an MTD partition 1144 * @mtd_write: Write to an MTD partition 1145 * @mtd_sync: Wait for write-back to complete on MTD partition. This 1146 * also notifies the driver that a writer has finished using this 1147 * partition. 1148 * @set_mac_address: Set the MAC address of the device 1149 * @revision: Hardware architecture revision 1150 * @txd_ptr_tbl_base: TX descriptor ring base address 1151 * @rxd_ptr_tbl_base: RX descriptor ring base address 1152 * @buf_tbl_base: Buffer table base address 1153 * @evq_ptr_tbl_base: Event queue pointer table base address 1154 * @evq_rptr_tbl_base: Event queue read-pointer table base address 1155 * @max_dma_mask: Maximum possible DMA mask 1156 * @rx_prefix_size: Size of RX prefix before packet data 1157 * @rx_hash_offset: Offset of RX flow hash within prefix 1158 * @rx_ts_offset: Offset of timestamp within prefix 1159 * @rx_buffer_padding: Size of padding at end of RX packet 1160 * @can_rx_scatter: NIC is able to scatter packets to multiple buffers 1161 * @always_rx_scatter: NIC will always scatter packets to multiple buffers 1162 * @max_interrupt_mode: Highest capability interrupt mode supported 1163 * from &enum ef4_init_mode. 1164 * @timer_period_max: Maximum period of interrupt timer (in ticks) 1165 * @offload_features: net_device feature flags for protocol offload 1166 * features implemented in hardware 1167 */ 1168 struct ef4_nic_type { 1169 unsigned int mem_bar; 1170 unsigned int (*mem_map_size)(struct ef4_nic *efx); 1171 int (*probe)(struct ef4_nic *efx); 1172 void (*remove)(struct ef4_nic *efx); 1173 int (*init)(struct ef4_nic *efx); 1174 int (*dimension_resources)(struct ef4_nic *efx); 1175 void (*fini)(struct ef4_nic *efx); 1176 void (*monitor)(struct ef4_nic *efx); 1177 enum reset_type (*map_reset_reason)(enum reset_type reason); 1178 int (*map_reset_flags)(u32 *flags); 1179 int (*reset)(struct ef4_nic *efx, enum reset_type method); 1180 int (*probe_port)(struct ef4_nic *efx); 1181 void (*remove_port)(struct ef4_nic *efx); 1182 bool (*handle_global_event)(struct ef4_channel *channel, ef4_qword_t *); 1183 int (*fini_dmaq)(struct ef4_nic *efx); 1184 void (*prepare_flush)(struct ef4_nic *efx); 1185 void (*finish_flush)(struct ef4_nic *efx); 1186 void (*prepare_flr)(struct ef4_nic *efx); 1187 void (*finish_flr)(struct ef4_nic *efx); 1188 size_t (*describe_stats)(struct ef4_nic *efx, u8 *names); 1189 size_t (*update_stats)(struct ef4_nic *efx, u64 *full_stats, 1190 struct rtnl_link_stats64 *core_stats); 1191 void (*start_stats)(struct ef4_nic *efx); 1192 void (*pull_stats)(struct ef4_nic *efx); 1193 void (*stop_stats)(struct ef4_nic *efx); 1194 void (*set_id_led)(struct ef4_nic *efx, enum ef4_led_mode mode); 1195 void (*push_irq_moderation)(struct ef4_channel *channel); 1196 int (*reconfigure_port)(struct ef4_nic *efx); 1197 void (*prepare_enable_fc_tx)(struct ef4_nic *efx); 1198 int (*reconfigure_mac)(struct ef4_nic *efx); 1199 bool (*check_mac_fault)(struct ef4_nic *efx); 1200 void (*get_wol)(struct ef4_nic *efx, struct ethtool_wolinfo *wol); 1201 int (*set_wol)(struct ef4_nic *efx, u32 type); 1202 void (*resume_wol)(struct ef4_nic *efx); 1203 int (*test_chip)(struct ef4_nic *efx, struct ef4_self_tests *tests); 1204 int (*test_nvram)(struct ef4_nic *efx); 1205 void (*irq_enable_master)(struct ef4_nic *efx); 1206 int (*irq_test_generate)(struct ef4_nic *efx); 1207 void (*irq_disable_non_ev)(struct ef4_nic *efx); 1208 irqreturn_t (*irq_handle_msi)(int irq, void *dev_id); 1209 irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id); 1210 int (*tx_probe)(struct ef4_tx_queue *tx_queue); 1211 void (*tx_init)(struct ef4_tx_queue *tx_queue); 1212 void (*tx_remove)(struct ef4_tx_queue *tx_queue); 1213 void (*tx_write)(struct ef4_tx_queue *tx_queue); 1214 unsigned int (*tx_limit_len)(struct ef4_tx_queue *tx_queue, 1215 dma_addr_t dma_addr, unsigned int len); 1216 int (*rx_push_rss_config)(struct ef4_nic *efx, bool user, 1217 const u32 *rx_indir_table); 1218 int (*rx_probe)(struct ef4_rx_queue *rx_queue); 1219 void (*rx_init)(struct ef4_rx_queue *rx_queue); 1220 void (*rx_remove)(struct ef4_rx_queue *rx_queue); 1221 void (*rx_write)(struct ef4_rx_queue *rx_queue); 1222 void (*rx_defer_refill)(struct ef4_rx_queue *rx_queue); 1223 int (*ev_probe)(struct ef4_channel *channel); 1224 int (*ev_init)(struct ef4_channel *channel); 1225 void (*ev_fini)(struct ef4_channel *channel); 1226 void (*ev_remove)(struct ef4_channel *channel); 1227 int (*ev_process)(struct ef4_channel *channel, int quota); 1228 void (*ev_read_ack)(struct ef4_channel *channel); 1229 void (*ev_test_generate)(struct ef4_channel *channel); 1230 int (*filter_table_probe)(struct ef4_nic *efx); 1231 void (*filter_table_restore)(struct ef4_nic *efx); 1232 void (*filter_table_remove)(struct ef4_nic *efx); 1233 void (*filter_update_rx_scatter)(struct ef4_nic *efx); 1234 s32 (*filter_insert)(struct ef4_nic *efx, 1235 struct ef4_filter_spec *spec, bool replace); 1236 int (*filter_remove_safe)(struct ef4_nic *efx, 1237 enum ef4_filter_priority priority, 1238 u32 filter_id); 1239 int (*filter_get_safe)(struct ef4_nic *efx, 1240 enum ef4_filter_priority priority, 1241 u32 filter_id, struct ef4_filter_spec *); 1242 int (*filter_clear_rx)(struct ef4_nic *efx, 1243 enum ef4_filter_priority priority); 1244 u32 (*filter_count_rx_used)(struct ef4_nic *efx, 1245 enum ef4_filter_priority priority); 1246 u32 (*filter_get_rx_id_limit)(struct ef4_nic *efx); 1247 s32 (*filter_get_rx_ids)(struct ef4_nic *efx, 1248 enum ef4_filter_priority priority, 1249 u32 *buf, u32 size); 1250 #ifdef CONFIG_RFS_ACCEL 1251 s32 (*filter_rfs_insert)(struct ef4_nic *efx, 1252 struct ef4_filter_spec *spec); 1253 bool (*filter_rfs_expire_one)(struct ef4_nic *efx, u32 flow_id, 1254 unsigned int index); 1255 #endif 1256 #ifdef CONFIG_SFC_FALCON_MTD 1257 int (*mtd_probe)(struct ef4_nic *efx); 1258 void (*mtd_rename)(struct ef4_mtd_partition *part); 1259 int (*mtd_read)(struct mtd_info *mtd, loff_t start, size_t len, 1260 size_t *retlen, u8 *buffer); 1261 int (*mtd_erase)(struct mtd_info *mtd, loff_t start, size_t len); 1262 int (*mtd_write)(struct mtd_info *mtd, loff_t start, size_t len, 1263 size_t *retlen, const u8 *buffer); 1264 int (*mtd_sync)(struct mtd_info *mtd); 1265 #endif 1266 int (*get_mac_address)(struct ef4_nic *efx, unsigned char *perm_addr); 1267 int (*set_mac_address)(struct ef4_nic *efx); 1268 1269 int revision; 1270 unsigned int txd_ptr_tbl_base; 1271 unsigned int rxd_ptr_tbl_base; 1272 unsigned int buf_tbl_base; 1273 unsigned int evq_ptr_tbl_base; 1274 unsigned int evq_rptr_tbl_base; 1275 u64 max_dma_mask; 1276 unsigned int rx_prefix_size; 1277 unsigned int rx_hash_offset; 1278 unsigned int rx_ts_offset; 1279 unsigned int rx_buffer_padding; 1280 bool can_rx_scatter; 1281 bool always_rx_scatter; 1282 unsigned int max_interrupt_mode; 1283 unsigned int timer_period_max; 1284 netdev_features_t offload_features; 1285 unsigned int max_rx_ip_filters; 1286 }; 1287 1288 /************************************************************************** 1289 * 1290 * Prototypes and inline functions 1291 * 1292 *************************************************************************/ 1293 1294 static inline struct ef4_channel * 1295 ef4_get_channel(struct ef4_nic *efx, unsigned index) 1296 { 1297 EF4_BUG_ON_PARANOID(index >= efx->n_channels); 1298 return efx->channel[index]; 1299 } 1300 1301 /* Iterate over all used channels */ 1302 #define ef4_for_each_channel(_channel, _efx) \ 1303 for (_channel = (_efx)->channel[0]; \ 1304 _channel; \ 1305 _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \ 1306 (_efx)->channel[_channel->channel + 1] : NULL) 1307 1308 /* Iterate over all used channels in reverse */ 1309 #define ef4_for_each_channel_rev(_channel, _efx) \ 1310 for (_channel = (_efx)->channel[(_efx)->n_channels - 1]; \ 1311 _channel; \ 1312 _channel = _channel->channel ? \ 1313 (_efx)->channel[_channel->channel - 1] : NULL) 1314 1315 static inline struct ef4_tx_queue * 1316 ef4_get_tx_queue(struct ef4_nic *efx, unsigned index, unsigned type) 1317 { 1318 EF4_BUG_ON_PARANOID(index >= efx->n_tx_channels || 1319 type >= EF4_TXQ_TYPES); 1320 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; 1321 } 1322 1323 static inline bool ef4_channel_has_tx_queues(struct ef4_channel *channel) 1324 { 1325 return channel->channel - channel->efx->tx_channel_offset < 1326 channel->efx->n_tx_channels; 1327 } 1328 1329 static inline struct ef4_tx_queue * 1330 ef4_channel_get_tx_queue(struct ef4_channel *channel, unsigned type) 1331 { 1332 EF4_BUG_ON_PARANOID(!ef4_channel_has_tx_queues(channel) || 1333 type >= EF4_TXQ_TYPES); 1334 return &channel->tx_queue[type]; 1335 } 1336 1337 static inline bool ef4_tx_queue_used(struct ef4_tx_queue *tx_queue) 1338 { 1339 return !(tx_queue->efx->net_dev->num_tc < 2 && 1340 tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI); 1341 } 1342 1343 /* Iterate over all TX queues belonging to a channel */ 1344 #define ef4_for_each_channel_tx_queue(_tx_queue, _channel) \ 1345 if (!ef4_channel_has_tx_queues(_channel)) \ 1346 ; \ 1347 else \ 1348 for (_tx_queue = (_channel)->tx_queue; \ 1349 _tx_queue < (_channel)->tx_queue + EF4_TXQ_TYPES && \ 1350 ef4_tx_queue_used(_tx_queue); \ 1351 _tx_queue++) 1352 1353 /* Iterate over all possible TX queues belonging to a channel */ 1354 #define ef4_for_each_possible_channel_tx_queue(_tx_queue, _channel) \ 1355 if (!ef4_channel_has_tx_queues(_channel)) \ 1356 ; \ 1357 else \ 1358 for (_tx_queue = (_channel)->tx_queue; \ 1359 _tx_queue < (_channel)->tx_queue + EF4_TXQ_TYPES; \ 1360 _tx_queue++) 1361 1362 static inline bool ef4_channel_has_rx_queue(struct ef4_channel *channel) 1363 { 1364 return channel->rx_queue.core_index >= 0; 1365 } 1366 1367 static inline struct ef4_rx_queue * 1368 ef4_channel_get_rx_queue(struct ef4_channel *channel) 1369 { 1370 EF4_BUG_ON_PARANOID(!ef4_channel_has_rx_queue(channel)); 1371 return &channel->rx_queue; 1372 } 1373 1374 /* Iterate over all RX queues belonging to a channel */ 1375 #define ef4_for_each_channel_rx_queue(_rx_queue, _channel) \ 1376 if (!ef4_channel_has_rx_queue(_channel)) \ 1377 ; \ 1378 else \ 1379 for (_rx_queue = &(_channel)->rx_queue; \ 1380 _rx_queue; \ 1381 _rx_queue = NULL) 1382 1383 static inline struct ef4_channel * 1384 ef4_rx_queue_channel(struct ef4_rx_queue *rx_queue) 1385 { 1386 return container_of(rx_queue, struct ef4_channel, rx_queue); 1387 } 1388 1389 static inline int ef4_rx_queue_index(struct ef4_rx_queue *rx_queue) 1390 { 1391 return ef4_rx_queue_channel(rx_queue)->channel; 1392 } 1393 1394 /* Returns a pointer to the specified receive buffer in the RX 1395 * descriptor queue. 1396 */ 1397 static inline struct ef4_rx_buffer *ef4_rx_buffer(struct ef4_rx_queue *rx_queue, 1398 unsigned int index) 1399 { 1400 return &rx_queue->buffer[index]; 1401 } 1402 1403 /** 1404 * EF4_MAX_FRAME_LEN - calculate maximum frame length 1405 * 1406 * This calculates the maximum frame length that will be used for a 1407 * given MTU. The frame length will be equal to the MTU plus a 1408 * constant amount of header space and padding. This is the quantity 1409 * that the net driver will program into the MAC as the maximum frame 1410 * length. 1411 * 1412 * The 10G MAC requires 8-byte alignment on the frame 1413 * length, so we round up to the nearest 8. 1414 * 1415 * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an 1416 * XGMII cycle). If the frame length reaches the maximum value in the 1417 * same cycle, the XMAC can miss the IPG altogether. We work around 1418 * this by adding a further 16 bytes. 1419 */ 1420 #define EF4_FRAME_PAD 16 1421 #define EF4_MAX_FRAME_LEN(mtu) \ 1422 (ALIGN(((mtu) + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN + EF4_FRAME_PAD), 8)) 1423 1424 /* Get all supported features. 1425 * If a feature is not fixed, it is present in hw_features. 1426 * If a feature is fixed, it does not present in hw_features, but 1427 * always in features. 1428 */ 1429 static inline netdev_features_t ef4_supported_features(const struct ef4_nic *efx) 1430 { 1431 const struct net_device *net_dev = efx->net_dev; 1432 1433 return net_dev->features | net_dev->hw_features; 1434 } 1435 1436 /* Get the current TX queue insert index. */ 1437 static inline unsigned int 1438 ef4_tx_queue_get_insert_index(const struct ef4_tx_queue *tx_queue) 1439 { 1440 return tx_queue->insert_count & tx_queue->ptr_mask; 1441 } 1442 1443 /* Get a TX buffer. */ 1444 static inline struct ef4_tx_buffer * 1445 __ef4_tx_queue_get_insert_buffer(const struct ef4_tx_queue *tx_queue) 1446 { 1447 return &tx_queue->buffer[ef4_tx_queue_get_insert_index(tx_queue)]; 1448 } 1449 1450 /* Get a TX buffer, checking it's not currently in use. */ 1451 static inline struct ef4_tx_buffer * 1452 ef4_tx_queue_get_insert_buffer(const struct ef4_tx_queue *tx_queue) 1453 { 1454 struct ef4_tx_buffer *buffer = 1455 __ef4_tx_queue_get_insert_buffer(tx_queue); 1456 1457 EF4_BUG_ON_PARANOID(buffer->len); 1458 EF4_BUG_ON_PARANOID(buffer->flags); 1459 EF4_BUG_ON_PARANOID(buffer->unmap_len); 1460 1461 return buffer; 1462 } 1463 1464 #endif /* EF4_NET_DRIVER_H */ 1465