1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 * Google virtual Ethernet (gve) driver 3 * 4 * Copyright (C) 2015-2021 Google, Inc. 5 */ 6 7 #ifndef _GVE_H_ 8 #define _GVE_H_ 9 10 #include <linux/dma-mapping.h> 11 #include <linux/netdevice.h> 12 #include <linux/pci.h> 13 #include <linux/u64_stats_sync.h> 14 15 #include "gve_desc.h" 16 #include "gve_desc_dqo.h" 17 18 #ifndef PCI_VENDOR_ID_GOOGLE 19 #define PCI_VENDOR_ID_GOOGLE 0x1ae0 20 #endif 21 22 #define PCI_DEV_ID_GVNIC 0x0042 23 24 #define GVE_REGISTER_BAR 0 25 #define GVE_DOORBELL_BAR 2 26 27 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */ 28 #define GVE_TX_MAX_IOVEC 4 29 /* 1 for management, 1 for rx, 1 for tx */ 30 #define GVE_MIN_MSIX 3 31 32 /* Numbers of gve tx/rx stats in stats report. */ 33 #define GVE_TX_STATS_REPORT_NUM 6 34 #define GVE_RX_STATS_REPORT_NUM 2 35 36 /* Interval to schedule a stats report update, 20000ms. */ 37 #define GVE_STATS_REPORT_TIMER_PERIOD 20000 38 39 /* Numbers of NIC tx/rx stats in stats report. */ 40 #define NIC_TX_STATS_REPORT_NUM 0 41 #define NIC_RX_STATS_REPORT_NUM 4 42 43 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1)) 44 45 /* PTYPEs are always 10 bits. */ 46 #define GVE_NUM_PTYPES 1024 47 48 #define GVE_RX_BUFFER_SIZE_DQO 2048 49 50 #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182 51 52 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */ 53 struct gve_rx_desc_queue { 54 struct gve_rx_desc *desc_ring; /* the descriptor ring */ 55 dma_addr_t bus; /* the bus for the desc_ring */ 56 u8 seqno; /* the next expected seqno for this desc*/ 57 }; 58 59 /* The page info for a single slot in the RX data queue */ 60 struct gve_rx_slot_page_info { 61 struct page *page; 62 void *page_address; 63 u32 page_offset; /* offset to write to in page */ 64 int pagecnt_bias; /* expected pagecnt if only the driver has a ref */ 65 u16 pad; /* adjustment for rx padding */ 66 u8 can_flip; /* tracks if the networking stack is using the page */ 67 }; 68 69 /* A list of pages registered with the device during setup and used by a queue 70 * as buffers 71 */ 72 struct gve_queue_page_list { 73 u32 id; /* unique id */ 74 u32 num_entries; 75 struct page **pages; /* list of num_entries pages */ 76 dma_addr_t *page_buses; /* the dma addrs of the pages */ 77 }; 78 79 /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */ 80 struct gve_rx_data_queue { 81 union gve_rx_data_slot *data_ring; /* read by NIC */ 82 dma_addr_t data_bus; /* dma mapping of the slots */ 83 struct gve_rx_slot_page_info *page_info; /* page info of the buffers */ 84 struct gve_queue_page_list *qpl; /* qpl assigned to this queue */ 85 u8 raw_addressing; /* use raw_addressing? */ 86 }; 87 88 struct gve_priv; 89 90 /* RX buffer queue for posting buffers to HW. 91 * Each RX (completion) queue has a corresponding buffer queue. 92 */ 93 struct gve_rx_buf_queue_dqo { 94 struct gve_rx_desc_dqo *desc_ring; 95 dma_addr_t bus; 96 u32 head; /* Pointer to start cleaning buffers at. */ 97 u32 tail; /* Last posted buffer index + 1 */ 98 u32 mask; /* Mask for indices to the size of the ring */ 99 }; 100 101 /* RX completion queue to receive packets from HW. */ 102 struct gve_rx_compl_queue_dqo { 103 struct gve_rx_compl_desc_dqo *desc_ring; 104 dma_addr_t bus; 105 106 /* Number of slots which did not have a buffer posted yet. We should not 107 * post more buffers than the queue size to avoid HW overrunning the 108 * queue. 109 */ 110 int num_free_slots; 111 112 /* HW uses a "generation bit" to notify SW of new descriptors. When a 113 * descriptor's generation bit is different from the current generation, 114 * that descriptor is ready to be consumed by SW. 115 */ 116 u8 cur_gen_bit; 117 118 /* Pointer into desc_ring where the next completion descriptor will be 119 * received. 120 */ 121 u32 head; 122 u32 mask; /* Mask for indices to the size of the ring */ 123 }; 124 125 /* Stores state for tracking buffers posted to HW */ 126 struct gve_rx_buf_state_dqo { 127 /* The page posted to HW. */ 128 struct gve_rx_slot_page_info page_info; 129 130 /* The DMA address corresponding to `page_info`. */ 131 dma_addr_t addr; 132 133 /* Last offset into the page when it only had a single reference, at 134 * which point every other offset is free to be reused. 135 */ 136 u32 last_single_ref_offset; 137 138 /* Linked list index to next element in the list, or -1 if none */ 139 s16 next; 140 }; 141 142 /* `head` and `tail` are indices into an array, or -1 if empty. */ 143 struct gve_index_list { 144 s16 head; 145 s16 tail; 146 }; 147 148 /* A single received packet split across multiple buffers may be 149 * reconstructed using the information in this structure. 150 */ 151 struct gve_rx_ctx { 152 /* head and tail of skb chain for the current packet or NULL if none */ 153 struct sk_buff *skb_head; 154 struct sk_buff *skb_tail; 155 u32 total_size; 156 u8 frag_cnt; 157 bool drop_pkt; 158 }; 159 160 struct gve_rx_cnts { 161 u32 ok_pkt_bytes; 162 u16 ok_pkt_cnt; 163 u16 total_pkt_cnt; 164 u16 cont_pkt_cnt; 165 u16 desc_err_pkt_cnt; 166 }; 167 168 /* Contains datapath state used to represent an RX queue. */ 169 struct gve_rx_ring { 170 struct gve_priv *gve; 171 union { 172 /* GQI fields */ 173 struct { 174 struct gve_rx_desc_queue desc; 175 struct gve_rx_data_queue data; 176 177 /* threshold for posting new buffs and descs */ 178 u32 db_threshold; 179 u16 packet_buffer_size; 180 181 u32 qpl_copy_pool_mask; 182 u32 qpl_copy_pool_head; 183 struct gve_rx_slot_page_info *qpl_copy_pool; 184 }; 185 186 /* DQO fields. */ 187 struct { 188 struct gve_rx_buf_queue_dqo bufq; 189 struct gve_rx_compl_queue_dqo complq; 190 191 struct gve_rx_buf_state_dqo *buf_states; 192 u16 num_buf_states; 193 194 /* Linked list of gve_rx_buf_state_dqo. Index into 195 * buf_states, or -1 if empty. 196 */ 197 s16 free_buf_states; 198 199 /* Linked list of gve_rx_buf_state_dqo. Indexes into 200 * buf_states, or -1 if empty. 201 * 202 * This list contains buf_states which are pointing to 203 * valid buffers. 204 * 205 * We use a FIFO here in order to increase the 206 * probability that buffers can be reused by increasing 207 * the time between usages. 208 */ 209 struct gve_index_list recycled_buf_states; 210 211 /* Linked list of gve_rx_buf_state_dqo. Indexes into 212 * buf_states, or -1 if empty. 213 * 214 * This list contains buf_states which have buffers 215 * which cannot be reused yet. 216 */ 217 struct gve_index_list used_buf_states; 218 } dqo; 219 }; 220 221 u64 rbytes; /* free-running bytes received */ 222 u64 rpackets; /* free-running packets received */ 223 u32 cnt; /* free-running total number of completed packets */ 224 u32 fill_cnt; /* free-running total number of descs and buffs posted */ 225 u32 mask; /* masks the cnt and fill_cnt to the size of the ring */ 226 u64 rx_copybreak_pkt; /* free-running count of copybreak packets */ 227 u64 rx_copied_pkt; /* free-running total number of copied packets */ 228 u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */ 229 u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */ 230 u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */ 231 u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */ 232 u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */ 233 u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */ 234 u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */ 235 236 u32 q_num; /* queue index */ 237 u32 ntfy_id; /* notification block index */ 238 struct gve_queue_resources *q_resources; /* head and tail pointer idx */ 239 dma_addr_t q_resources_bus; /* dma address for the queue resources */ 240 struct u64_stats_sync statss; /* sync stats for 32bit archs */ 241 242 struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */ 243 }; 244 245 /* A TX desc ring entry */ 246 union gve_tx_desc { 247 struct gve_tx_pkt_desc pkt; /* first desc for a packet */ 248 struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */ 249 struct gve_tx_seg_desc seg; /* subsequent descs for a packet */ 250 }; 251 252 /* Tracks the memory in the fifo occupied by a segment of a packet */ 253 struct gve_tx_iovec { 254 u32 iov_offset; /* offset into this segment */ 255 u32 iov_len; /* length */ 256 u32 iov_padding; /* padding associated with this segment */ 257 }; 258 259 /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc 260 * ring entry but only used for a pkt_desc not a seg_desc 261 */ 262 struct gve_tx_buffer_state { 263 struct sk_buff *skb; /* skb for this pkt */ 264 union { 265 struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */ 266 struct { 267 DEFINE_DMA_UNMAP_ADDR(dma); 268 DEFINE_DMA_UNMAP_LEN(len); 269 }; 270 }; 271 }; 272 273 /* A TX buffer - each queue has one */ 274 struct gve_tx_fifo { 275 void *base; /* address of base of FIFO */ 276 u32 size; /* total size */ 277 atomic_t available; /* how much space is still available */ 278 u32 head; /* offset to write at */ 279 struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */ 280 }; 281 282 /* TX descriptor for DQO format */ 283 union gve_tx_desc_dqo { 284 struct gve_tx_pkt_desc_dqo pkt; 285 struct gve_tx_tso_context_desc_dqo tso_ctx; 286 struct gve_tx_general_context_desc_dqo general_ctx; 287 }; 288 289 enum gve_packet_state { 290 /* Packet is in free list, available to be allocated. 291 * This should always be zero since state is not explicitly initialized. 292 */ 293 GVE_PACKET_STATE_UNALLOCATED, 294 /* Packet is expecting a regular data completion or miss completion */ 295 GVE_PACKET_STATE_PENDING_DATA_COMPL, 296 /* Packet has received a miss completion and is expecting a 297 * re-injection completion. 298 */ 299 GVE_PACKET_STATE_PENDING_REINJECT_COMPL, 300 /* No valid completion received within the specified timeout. */ 301 GVE_PACKET_STATE_TIMED_OUT_COMPL, 302 }; 303 304 struct gve_tx_pending_packet_dqo { 305 struct sk_buff *skb; /* skb for this packet */ 306 307 /* 0th element corresponds to the linear portion of `skb`, should be 308 * unmapped with `dma_unmap_single`. 309 * 310 * All others correspond to `skb`'s frags and should be unmapped with 311 * `dma_unmap_page`. 312 */ 313 DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]); 314 DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]); 315 u16 num_bufs; 316 317 /* Linked list index to next element in the list, or -1 if none */ 318 s16 next; 319 320 /* Linked list index to prev element in the list, or -1 if none. 321 * Used for tracking either outstanding miss completions or prematurely 322 * freed packets. 323 */ 324 s16 prev; 325 326 /* Identifies the current state of the packet as defined in 327 * `enum gve_packet_state`. 328 */ 329 u8 state; 330 331 /* If packet is an outstanding miss completion, then the packet is 332 * freed if the corresponding re-injection completion is not received 333 * before kernel jiffies exceeds timeout_jiffies. 334 */ 335 unsigned long timeout_jiffies; 336 }; 337 338 /* Contains datapath state used to represent a TX queue. */ 339 struct gve_tx_ring { 340 /* Cacheline 0 -- Accessed & dirtied during transmit */ 341 union { 342 /* GQI fields */ 343 struct { 344 struct gve_tx_fifo tx_fifo; 345 u32 req; /* driver tracked head pointer */ 346 u32 done; /* driver tracked tail pointer */ 347 }; 348 349 /* DQO fields. */ 350 struct { 351 /* Linked list of gve_tx_pending_packet_dqo. Index into 352 * pending_packets, or -1 if empty. 353 * 354 * This is a consumer list owned by the TX path. When it 355 * runs out, the producer list is stolen from the 356 * completion handling path 357 * (dqo_compl.free_pending_packets). 358 */ 359 s16 free_pending_packets; 360 361 /* Cached value of `dqo_compl.hw_tx_head` */ 362 u32 head; 363 u32 tail; /* Last posted buffer index + 1 */ 364 365 /* Index of the last descriptor with "report event" bit 366 * set. 367 */ 368 u32 last_re_idx; 369 } dqo_tx; 370 }; 371 372 /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */ 373 union { 374 /* GQI fields */ 375 struct { 376 /* Spinlock for when cleanup in progress */ 377 spinlock_t clean_lock; 378 }; 379 380 /* DQO fields. */ 381 struct { 382 u32 head; /* Last read on compl_desc */ 383 384 /* Tracks the current gen bit of compl_q */ 385 u8 cur_gen_bit; 386 387 /* Linked list of gve_tx_pending_packet_dqo. Index into 388 * pending_packets, or -1 if empty. 389 * 390 * This is the producer list, owned by the completion 391 * handling path. When the consumer list 392 * (dqo_tx.free_pending_packets) is runs out, this list 393 * will be stolen. 394 */ 395 atomic_t free_pending_packets; 396 397 /* Last TX ring index fetched by HW */ 398 atomic_t hw_tx_head; 399 400 /* List to track pending packets which received a miss 401 * completion but not a corresponding reinjection. 402 */ 403 struct gve_index_list miss_completions; 404 405 /* List to track pending packets that were completed 406 * before receiving a valid completion because they 407 * reached a specified timeout. 408 */ 409 struct gve_index_list timed_out_completions; 410 } dqo_compl; 411 } ____cacheline_aligned; 412 u64 pkt_done; /* free-running - total packets completed */ 413 u64 bytes_done; /* free-running - total bytes completed */ 414 u64 dropped_pkt; /* free-running - total packets dropped */ 415 u64 dma_mapping_error; /* count of dma mapping errors */ 416 417 /* Cacheline 2 -- Read-mostly fields */ 418 union { 419 /* GQI fields */ 420 struct { 421 union gve_tx_desc *desc; 422 423 /* Maps 1:1 to a desc */ 424 struct gve_tx_buffer_state *info; 425 }; 426 427 /* DQO fields. */ 428 struct { 429 union gve_tx_desc_dqo *tx_ring; 430 struct gve_tx_compl_desc *compl_ring; 431 432 struct gve_tx_pending_packet_dqo *pending_packets; 433 s16 num_pending_packets; 434 435 u32 complq_mask; /* complq size is complq_mask + 1 */ 436 } dqo; 437 } ____cacheline_aligned; 438 struct netdev_queue *netdev_txq; 439 struct gve_queue_resources *q_resources; /* head and tail pointer idx */ 440 struct device *dev; 441 u32 mask; /* masks req and done down to queue size */ 442 u8 raw_addressing; /* use raw_addressing? */ 443 444 /* Slow-path fields */ 445 u32 q_num ____cacheline_aligned; /* queue idx */ 446 u32 stop_queue; /* count of queue stops */ 447 u32 wake_queue; /* count of queue wakes */ 448 u32 queue_timeout; /* count of queue timeouts */ 449 u32 ntfy_id; /* notification block index */ 450 u32 last_kick_msec; /* Last time the queue was kicked */ 451 dma_addr_t bus; /* dma address of the descr ring */ 452 dma_addr_t q_resources_bus; /* dma address of the queue resources */ 453 dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */ 454 struct u64_stats_sync statss; /* sync stats for 32bit archs */ 455 } ____cacheline_aligned; 456 457 /* Wraps the info for one irq including the napi struct and the queues 458 * associated with that irq. 459 */ 460 struct gve_notify_block { 461 __be32 *irq_db_index; /* pointer to idx into Bar2 */ 462 char name[IFNAMSIZ + 16]; /* name registered with the kernel */ 463 struct napi_struct napi; /* kernel napi struct for this block */ 464 struct gve_priv *priv; 465 struct gve_tx_ring *tx; /* tx rings on this block */ 466 struct gve_rx_ring *rx; /* rx rings on this block */ 467 }; 468 469 /* Tracks allowed and current queue settings */ 470 struct gve_queue_config { 471 u16 max_queues; 472 u16 num_queues; /* current */ 473 }; 474 475 /* Tracks the available and used qpl IDs */ 476 struct gve_qpl_config { 477 u32 qpl_map_size; /* map memory size */ 478 unsigned long *qpl_id_map; /* bitmap of used qpl ids */ 479 }; 480 481 struct gve_options_dqo_rda { 482 u16 tx_comp_ring_entries; /* number of tx_comp descriptors */ 483 u16 rx_buff_ring_entries; /* number of rx_buff descriptors */ 484 }; 485 486 struct gve_irq_db { 487 __be32 index; 488 } ____cacheline_aligned; 489 490 struct gve_ptype { 491 u8 l3_type; /* `gve_l3_type` in gve_adminq.h */ 492 u8 l4_type; /* `gve_l4_type` in gve_adminq.h */ 493 }; 494 495 struct gve_ptype_lut { 496 struct gve_ptype ptypes[GVE_NUM_PTYPES]; 497 }; 498 499 /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value 500 * when the entire configure_device_resources command is zeroed out and the 501 * queue_format is not specified. 502 */ 503 enum gve_queue_format { 504 GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0, 505 GVE_GQI_RDA_FORMAT = 0x1, 506 GVE_GQI_QPL_FORMAT = 0x2, 507 GVE_DQO_RDA_FORMAT = 0x3, 508 }; 509 510 struct gve_priv { 511 struct net_device *dev; 512 struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */ 513 struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */ 514 struct gve_queue_page_list *qpls; /* array of num qpls */ 515 struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */ 516 struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */ 517 dma_addr_t irq_db_indices_bus; 518 struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */ 519 char mgmt_msix_name[IFNAMSIZ + 16]; 520 u32 mgmt_msix_idx; 521 __be32 *counter_array; /* array of num_event_counters */ 522 dma_addr_t counter_array_bus; 523 524 u16 num_event_counters; 525 u16 tx_desc_cnt; /* num desc per ring */ 526 u16 rx_desc_cnt; /* num desc per ring */ 527 u16 tx_pages_per_qpl; /* tx buffer length */ 528 u16 rx_data_slot_cnt; /* rx buffer length */ 529 u64 max_registered_pages; 530 u64 num_registered_pages; /* num pages registered with NIC */ 531 u32 rx_copybreak; /* copy packets smaller than this */ 532 u16 default_num_queues; /* default num queues to set up */ 533 534 struct gve_queue_config tx_cfg; 535 struct gve_queue_config rx_cfg; 536 struct gve_qpl_config qpl_cfg; /* map used QPL ids */ 537 u32 num_ntfy_blks; /* spilt between TX and RX so must be even */ 538 539 struct gve_registers __iomem *reg_bar0; /* see gve_register.h */ 540 __be32 __iomem *db_bar2; /* "array" of doorbells */ 541 u32 msg_enable; /* level for netif* netdev print macros */ 542 struct pci_dev *pdev; 543 544 /* metrics */ 545 u32 tx_timeo_cnt; 546 547 /* Admin queue - see gve_adminq.h*/ 548 union gve_adminq_command *adminq; 549 dma_addr_t adminq_bus_addr; 550 u32 adminq_mask; /* masks prod_cnt to adminq size */ 551 u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */ 552 u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */ 553 u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */ 554 /* free-running count of per AQ cmd executed */ 555 u32 adminq_describe_device_cnt; 556 u32 adminq_cfg_device_resources_cnt; 557 u32 adminq_register_page_list_cnt; 558 u32 adminq_unregister_page_list_cnt; 559 u32 adminq_create_tx_queue_cnt; 560 u32 adminq_create_rx_queue_cnt; 561 u32 adminq_destroy_tx_queue_cnt; 562 u32 adminq_destroy_rx_queue_cnt; 563 u32 adminq_dcfg_device_resources_cnt; 564 u32 adminq_set_driver_parameter_cnt; 565 u32 adminq_report_stats_cnt; 566 u32 adminq_report_link_speed_cnt; 567 u32 adminq_get_ptype_map_cnt; 568 u32 adminq_verify_driver_compatibility_cnt; 569 570 /* Global stats */ 571 u32 interface_up_cnt; /* count of times interface turned up since last reset */ 572 u32 interface_down_cnt; /* count of times interface turned down since last reset */ 573 u32 reset_cnt; /* count of reset */ 574 u32 page_alloc_fail; /* count of page alloc fails */ 575 u32 dma_mapping_error; /* count of dma mapping errors */ 576 u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */ 577 u32 suspend_cnt; /* count of times suspended */ 578 u32 resume_cnt; /* count of times resumed */ 579 struct workqueue_struct *gve_wq; 580 struct work_struct service_task; 581 struct work_struct stats_report_task; 582 unsigned long service_task_flags; 583 unsigned long state_flags; 584 585 struct gve_stats_report *stats_report; 586 u64 stats_report_len; 587 dma_addr_t stats_report_bus; /* dma address for the stats report */ 588 unsigned long ethtool_flags; 589 590 unsigned long stats_report_timer_period; 591 struct timer_list stats_report_timer; 592 593 /* Gvnic device link speed from hypervisor. */ 594 u64 link_speed; 595 bool up_before_suspend; /* True if dev was up before suspend */ 596 597 struct gve_options_dqo_rda options_dqo_rda; 598 struct gve_ptype_lut *ptype_lut_dqo; 599 600 /* Must be a power of two. */ 601 int data_buffer_size_dqo; 602 603 enum gve_queue_format queue_format; 604 605 /* Interrupt coalescing settings */ 606 u32 tx_coalesce_usecs; 607 u32 rx_coalesce_usecs; 608 }; 609 610 enum gve_service_task_flags_bit { 611 GVE_PRIV_FLAGS_DO_RESET = 1, 612 GVE_PRIV_FLAGS_RESET_IN_PROGRESS = 2, 613 GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = 3, 614 GVE_PRIV_FLAGS_DO_REPORT_STATS = 4, 615 }; 616 617 enum gve_state_flags_bit { 618 GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = 1, 619 GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = 2, 620 GVE_PRIV_FLAGS_DEVICE_RINGS_OK = 3, 621 GVE_PRIV_FLAGS_NAPI_ENABLED = 4, 622 }; 623 624 enum gve_ethtool_flags_bit { 625 GVE_PRIV_FLAGS_REPORT_STATS = 0, 626 }; 627 628 static inline bool gve_get_do_reset(struct gve_priv *priv) 629 { 630 return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); 631 } 632 633 static inline void gve_set_do_reset(struct gve_priv *priv) 634 { 635 set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); 636 } 637 638 static inline void gve_clear_do_reset(struct gve_priv *priv) 639 { 640 clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); 641 } 642 643 static inline bool gve_get_reset_in_progress(struct gve_priv *priv) 644 { 645 return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, 646 &priv->service_task_flags); 647 } 648 649 static inline void gve_set_reset_in_progress(struct gve_priv *priv) 650 { 651 set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags); 652 } 653 654 static inline void gve_clear_reset_in_progress(struct gve_priv *priv) 655 { 656 clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags); 657 } 658 659 static inline bool gve_get_probe_in_progress(struct gve_priv *priv) 660 { 661 return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, 662 &priv->service_task_flags); 663 } 664 665 static inline void gve_set_probe_in_progress(struct gve_priv *priv) 666 { 667 set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags); 668 } 669 670 static inline void gve_clear_probe_in_progress(struct gve_priv *priv) 671 { 672 clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags); 673 } 674 675 static inline bool gve_get_do_report_stats(struct gve_priv *priv) 676 { 677 return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, 678 &priv->service_task_flags); 679 } 680 681 static inline void gve_set_do_report_stats(struct gve_priv *priv) 682 { 683 set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags); 684 } 685 686 static inline void gve_clear_do_report_stats(struct gve_priv *priv) 687 { 688 clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags); 689 } 690 691 static inline bool gve_get_admin_queue_ok(struct gve_priv *priv) 692 { 693 return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); 694 } 695 696 static inline void gve_set_admin_queue_ok(struct gve_priv *priv) 697 { 698 set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); 699 } 700 701 static inline void gve_clear_admin_queue_ok(struct gve_priv *priv) 702 { 703 clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); 704 } 705 706 static inline bool gve_get_device_resources_ok(struct gve_priv *priv) 707 { 708 return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); 709 } 710 711 static inline void gve_set_device_resources_ok(struct gve_priv *priv) 712 { 713 set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); 714 } 715 716 static inline void gve_clear_device_resources_ok(struct gve_priv *priv) 717 { 718 clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); 719 } 720 721 static inline bool gve_get_device_rings_ok(struct gve_priv *priv) 722 { 723 return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); 724 } 725 726 static inline void gve_set_device_rings_ok(struct gve_priv *priv) 727 { 728 set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); 729 } 730 731 static inline void gve_clear_device_rings_ok(struct gve_priv *priv) 732 { 733 clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); 734 } 735 736 static inline bool gve_get_napi_enabled(struct gve_priv *priv) 737 { 738 return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); 739 } 740 741 static inline void gve_set_napi_enabled(struct gve_priv *priv) 742 { 743 set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); 744 } 745 746 static inline void gve_clear_napi_enabled(struct gve_priv *priv) 747 { 748 clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); 749 } 750 751 static inline bool gve_get_report_stats(struct gve_priv *priv) 752 { 753 return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags); 754 } 755 756 static inline void gve_clear_report_stats(struct gve_priv *priv) 757 { 758 clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags); 759 } 760 761 /* Returns the address of the ntfy_blocks irq doorbell 762 */ 763 static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv, 764 struct gve_notify_block *block) 765 { 766 return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)]; 767 } 768 769 /* Returns the index into ntfy_blocks of the given tx ring's block 770 */ 771 static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx) 772 { 773 return queue_idx; 774 } 775 776 /* Returns the index into ntfy_blocks of the given rx ring's block 777 */ 778 static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx) 779 { 780 return (priv->num_ntfy_blks / 2) + queue_idx; 781 } 782 783 /* Returns the number of tx queue page lists 784 */ 785 static inline u32 gve_num_tx_qpls(struct gve_priv *priv) 786 { 787 if (priv->queue_format != GVE_GQI_QPL_FORMAT) 788 return 0; 789 790 return priv->tx_cfg.num_queues; 791 } 792 793 /* Returns the number of rx queue page lists 794 */ 795 static inline u32 gve_num_rx_qpls(struct gve_priv *priv) 796 { 797 if (priv->queue_format != GVE_GQI_QPL_FORMAT) 798 return 0; 799 800 return priv->rx_cfg.num_queues; 801 } 802 803 /* Returns a pointer to the next available tx qpl in the list of qpls 804 */ 805 static inline 806 struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv) 807 { 808 int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map, 809 priv->qpl_cfg.qpl_map_size); 810 811 /* we are out of tx qpls */ 812 if (id >= gve_num_tx_qpls(priv)) 813 return NULL; 814 815 set_bit(id, priv->qpl_cfg.qpl_id_map); 816 return &priv->qpls[id]; 817 } 818 819 /* Returns a pointer to the next available rx qpl in the list of qpls 820 */ 821 static inline 822 struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv) 823 { 824 int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map, 825 priv->qpl_cfg.qpl_map_size, 826 gve_num_tx_qpls(priv)); 827 828 /* we are out of rx qpls */ 829 if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv)) 830 return NULL; 831 832 set_bit(id, priv->qpl_cfg.qpl_id_map); 833 return &priv->qpls[id]; 834 } 835 836 /* Unassigns the qpl with the given id 837 */ 838 static inline void gve_unassign_qpl(struct gve_priv *priv, int id) 839 { 840 clear_bit(id, priv->qpl_cfg.qpl_id_map); 841 } 842 843 /* Returns the correct dma direction for tx and rx qpls 844 */ 845 static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv, 846 int id) 847 { 848 if (id < gve_num_tx_qpls(priv)) 849 return DMA_TO_DEVICE; 850 else 851 return DMA_FROM_DEVICE; 852 } 853 854 static inline bool gve_is_gqi(struct gve_priv *priv) 855 { 856 return priv->queue_format == GVE_GQI_RDA_FORMAT || 857 priv->queue_format == GVE_GQI_QPL_FORMAT; 858 } 859 860 /* buffers */ 861 int gve_alloc_page(struct gve_priv *priv, struct device *dev, 862 struct page **page, dma_addr_t *dma, 863 enum dma_data_direction, gfp_t gfp_flags); 864 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma, 865 enum dma_data_direction); 866 /* tx handling */ 867 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev); 868 bool gve_tx_poll(struct gve_notify_block *block, int budget); 869 int gve_tx_alloc_rings(struct gve_priv *priv); 870 void gve_tx_free_rings_gqi(struct gve_priv *priv); 871 u32 gve_tx_load_event_counter(struct gve_priv *priv, 872 struct gve_tx_ring *tx); 873 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx); 874 /* rx handling */ 875 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx); 876 int gve_rx_poll(struct gve_notify_block *block, int budget); 877 bool gve_rx_work_pending(struct gve_rx_ring *rx); 878 int gve_rx_alloc_rings(struct gve_priv *priv); 879 void gve_rx_free_rings_gqi(struct gve_priv *priv); 880 /* Reset */ 881 void gve_schedule_reset(struct gve_priv *priv); 882 int gve_reset(struct gve_priv *priv, bool attempt_teardown); 883 int gve_adjust_queues(struct gve_priv *priv, 884 struct gve_queue_config new_rx_config, 885 struct gve_queue_config new_tx_config); 886 /* report stats handling */ 887 void gve_handle_report_stats(struct gve_priv *priv); 888 /* exported by ethtool.c */ 889 extern const struct ethtool_ops gve_ethtool_ops; 890 /* needed by ethtool */ 891 extern const char gve_version_str[]; 892 #endif /* _GVE_H_ */ 893