xref: /openbmc/linux/drivers/net/ethernet/google/gve/gve.h (revision 2a598d0b)
1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2  * Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2021 Google, Inc.
5  */
6 
7 #ifndef _GVE_H_
8 #define _GVE_H_
9 
10 #include <linux/dma-mapping.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/u64_stats_sync.h>
14 
15 #include "gve_desc.h"
16 #include "gve_desc_dqo.h"
17 
18 #ifndef PCI_VENDOR_ID_GOOGLE
19 #define PCI_VENDOR_ID_GOOGLE	0x1ae0
20 #endif
21 
22 #define PCI_DEV_ID_GVNIC	0x0042
23 
24 #define GVE_REGISTER_BAR	0
25 #define GVE_DOORBELL_BAR	2
26 
27 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
28 #define GVE_TX_MAX_IOVEC	4
29 /* 1 for management, 1 for rx, 1 for tx */
30 #define GVE_MIN_MSIX 3
31 
32 /* Numbers of gve tx/rx stats in stats report. */
33 #define GVE_TX_STATS_REPORT_NUM	6
34 #define GVE_RX_STATS_REPORT_NUM	2
35 
36 /* Interval to schedule a stats report update, 20000ms. */
37 #define GVE_STATS_REPORT_TIMER_PERIOD	20000
38 
39 /* Numbers of NIC tx/rx stats in stats report. */
40 #define NIC_TX_STATS_REPORT_NUM	0
41 #define NIC_RX_STATS_REPORT_NUM	4
42 
43 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
44 
45 /* PTYPEs are always 10 bits. */
46 #define GVE_NUM_PTYPES	1024
47 
48 #define GVE_RX_BUFFER_SIZE_DQO 2048
49 
50 #define GVE_XDP_ACTIONS 5
51 
52 #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
53 
54 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
55 struct gve_rx_desc_queue {
56 	struct gve_rx_desc *desc_ring; /* the descriptor ring */
57 	dma_addr_t bus; /* the bus for the desc_ring */
58 	u8 seqno; /* the next expected seqno for this desc*/
59 };
60 
61 /* The page info for a single slot in the RX data queue */
62 struct gve_rx_slot_page_info {
63 	struct page *page;
64 	void *page_address;
65 	u32 page_offset; /* offset to write to in page */
66 	int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
67 	u16 pad; /* adjustment for rx padding */
68 	u8 can_flip; /* tracks if the networking stack is using the page */
69 };
70 
71 /* A list of pages registered with the device during setup and used by a queue
72  * as buffers
73  */
74 struct gve_queue_page_list {
75 	u32 id; /* unique id */
76 	u32 num_entries;
77 	struct page **pages; /* list of num_entries pages */
78 	dma_addr_t *page_buses; /* the dma addrs of the pages */
79 };
80 
81 /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
82 struct gve_rx_data_queue {
83 	union gve_rx_data_slot *data_ring; /* read by NIC */
84 	dma_addr_t data_bus; /* dma mapping of the slots */
85 	struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
86 	struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
87 	u8 raw_addressing; /* use raw_addressing? */
88 };
89 
90 struct gve_priv;
91 
92 /* RX buffer queue for posting buffers to HW.
93  * Each RX (completion) queue has a corresponding buffer queue.
94  */
95 struct gve_rx_buf_queue_dqo {
96 	struct gve_rx_desc_dqo *desc_ring;
97 	dma_addr_t bus;
98 	u32 head; /* Pointer to start cleaning buffers at. */
99 	u32 tail; /* Last posted buffer index + 1 */
100 	u32 mask; /* Mask for indices to the size of the ring */
101 };
102 
103 /* RX completion queue to receive packets from HW. */
104 struct gve_rx_compl_queue_dqo {
105 	struct gve_rx_compl_desc_dqo *desc_ring;
106 	dma_addr_t bus;
107 
108 	/* Number of slots which did not have a buffer posted yet. We should not
109 	 * post more buffers than the queue size to avoid HW overrunning the
110 	 * queue.
111 	 */
112 	int num_free_slots;
113 
114 	/* HW uses a "generation bit" to notify SW of new descriptors. When a
115 	 * descriptor's generation bit is different from the current generation,
116 	 * that descriptor is ready to be consumed by SW.
117 	 */
118 	u8 cur_gen_bit;
119 
120 	/* Pointer into desc_ring where the next completion descriptor will be
121 	 * received.
122 	 */
123 	u32 head;
124 	u32 mask; /* Mask for indices to the size of the ring */
125 };
126 
127 /* Stores state for tracking buffers posted to HW */
128 struct gve_rx_buf_state_dqo {
129 	/* The page posted to HW. */
130 	struct gve_rx_slot_page_info page_info;
131 
132 	/* The DMA address corresponding to `page_info`. */
133 	dma_addr_t addr;
134 
135 	/* Last offset into the page when it only had a single reference, at
136 	 * which point every other offset is free to be reused.
137 	 */
138 	u32 last_single_ref_offset;
139 
140 	/* Linked list index to next element in the list, or -1 if none */
141 	s16 next;
142 };
143 
144 /* `head` and `tail` are indices into an array, or -1 if empty. */
145 struct gve_index_list {
146 	s16 head;
147 	s16 tail;
148 };
149 
150 /* A single received packet split across multiple buffers may be
151  * reconstructed using the information in this structure.
152  */
153 struct gve_rx_ctx {
154 	/* head and tail of skb chain for the current packet or NULL if none */
155 	struct sk_buff *skb_head;
156 	struct sk_buff *skb_tail;
157 	u32 total_size;
158 	u8 frag_cnt;
159 	bool drop_pkt;
160 };
161 
162 struct gve_rx_cnts {
163 	u32 ok_pkt_bytes;
164 	u16 ok_pkt_cnt;
165 	u16 total_pkt_cnt;
166 	u16 cont_pkt_cnt;
167 	u16 desc_err_pkt_cnt;
168 };
169 
170 /* Contains datapath state used to represent an RX queue. */
171 struct gve_rx_ring {
172 	struct gve_priv *gve;
173 	union {
174 		/* GQI fields */
175 		struct {
176 			struct gve_rx_desc_queue desc;
177 			struct gve_rx_data_queue data;
178 
179 			/* threshold for posting new buffs and descs */
180 			u32 db_threshold;
181 			u16 packet_buffer_size;
182 
183 			u32 qpl_copy_pool_mask;
184 			u32 qpl_copy_pool_head;
185 			struct gve_rx_slot_page_info *qpl_copy_pool;
186 		};
187 
188 		/* DQO fields. */
189 		struct {
190 			struct gve_rx_buf_queue_dqo bufq;
191 			struct gve_rx_compl_queue_dqo complq;
192 
193 			struct gve_rx_buf_state_dqo *buf_states;
194 			u16 num_buf_states;
195 
196 			/* Linked list of gve_rx_buf_state_dqo. Index into
197 			 * buf_states, or -1 if empty.
198 			 */
199 			s16 free_buf_states;
200 
201 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
202 			 * buf_states, or -1 if empty.
203 			 *
204 			 * This list contains buf_states which are pointing to
205 			 * valid buffers.
206 			 *
207 			 * We use a FIFO here in order to increase the
208 			 * probability that buffers can be reused by increasing
209 			 * the time between usages.
210 			 */
211 			struct gve_index_list recycled_buf_states;
212 
213 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
214 			 * buf_states, or -1 if empty.
215 			 *
216 			 * This list contains buf_states which have buffers
217 			 * which cannot be reused yet.
218 			 */
219 			struct gve_index_list used_buf_states;
220 		} dqo;
221 	};
222 
223 	u64 rbytes; /* free-running bytes received */
224 	u64 rpackets; /* free-running packets received */
225 	u32 cnt; /* free-running total number of completed packets */
226 	u32 fill_cnt; /* free-running total number of descs and buffs posted */
227 	u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
228 	u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
229 	u64 rx_copied_pkt; /* free-running total number of copied packets */
230 	u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
231 	u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
232 	u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
233 	u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
234 	u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
235 	u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
236 	u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
237 	u64 xdp_tx_errors;
238 	u64 xdp_redirect_errors;
239 	u64 xdp_alloc_fails;
240 	u64 xdp_actions[GVE_XDP_ACTIONS];
241 	u32 q_num; /* queue index */
242 	u32 ntfy_id; /* notification block index */
243 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
244 	dma_addr_t q_resources_bus; /* dma address for the queue resources */
245 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
246 
247 	struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
248 
249 	/* XDP stuff */
250 	struct xdp_rxq_info xdp_rxq;
251 	struct xdp_rxq_info xsk_rxq;
252 	struct xsk_buff_pool *xsk_pool;
253 	struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
254 };
255 
256 /* A TX desc ring entry */
257 union gve_tx_desc {
258 	struct gve_tx_pkt_desc pkt; /* first desc for a packet */
259 	struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
260 	struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
261 };
262 
263 /* Tracks the memory in the fifo occupied by a segment of a packet */
264 struct gve_tx_iovec {
265 	u32 iov_offset; /* offset into this segment */
266 	u32 iov_len; /* length */
267 	u32 iov_padding; /* padding associated with this segment */
268 };
269 
270 /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
271  * ring entry but only used for a pkt_desc not a seg_desc
272  */
273 struct gve_tx_buffer_state {
274 	union {
275 		struct sk_buff *skb; /* skb for this pkt */
276 		struct xdp_frame *xdp_frame; /* xdp_frame */
277 	};
278 	struct {
279 		u16 size; /* size of xmitted xdp pkt */
280 		u8 is_xsk; /* xsk buff */
281 	} xdp;
282 	union {
283 		struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
284 		struct {
285 			DEFINE_DMA_UNMAP_ADDR(dma);
286 			DEFINE_DMA_UNMAP_LEN(len);
287 		};
288 	};
289 };
290 
291 /* A TX buffer - each queue has one */
292 struct gve_tx_fifo {
293 	void *base; /* address of base of FIFO */
294 	u32 size; /* total size */
295 	atomic_t available; /* how much space is still available */
296 	u32 head; /* offset to write at */
297 	struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
298 };
299 
300 /* TX descriptor for DQO format */
301 union gve_tx_desc_dqo {
302 	struct gve_tx_pkt_desc_dqo pkt;
303 	struct gve_tx_tso_context_desc_dqo tso_ctx;
304 	struct gve_tx_general_context_desc_dqo general_ctx;
305 };
306 
307 enum gve_packet_state {
308 	/* Packet is in free list, available to be allocated.
309 	 * This should always be zero since state is not explicitly initialized.
310 	 */
311 	GVE_PACKET_STATE_UNALLOCATED,
312 	/* Packet is expecting a regular data completion or miss completion */
313 	GVE_PACKET_STATE_PENDING_DATA_COMPL,
314 	/* Packet has received a miss completion and is expecting a
315 	 * re-injection completion.
316 	 */
317 	GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
318 	/* No valid completion received within the specified timeout. */
319 	GVE_PACKET_STATE_TIMED_OUT_COMPL,
320 };
321 
322 struct gve_tx_pending_packet_dqo {
323 	struct sk_buff *skb; /* skb for this packet */
324 
325 	/* 0th element corresponds to the linear portion of `skb`, should be
326 	 * unmapped with `dma_unmap_single`.
327 	 *
328 	 * All others correspond to `skb`'s frags and should be unmapped with
329 	 * `dma_unmap_page`.
330 	 */
331 	DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
332 	DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
333 	u16 num_bufs;
334 
335 	/* Linked list index to next element in the list, or -1 if none */
336 	s16 next;
337 
338 	/* Linked list index to prev element in the list, or -1 if none.
339 	 * Used for tracking either outstanding miss completions or prematurely
340 	 * freed packets.
341 	 */
342 	s16 prev;
343 
344 	/* Identifies the current state of the packet as defined in
345 	 * `enum gve_packet_state`.
346 	 */
347 	u8 state;
348 
349 	/* If packet is an outstanding miss completion, then the packet is
350 	 * freed if the corresponding re-injection completion is not received
351 	 * before kernel jiffies exceeds timeout_jiffies.
352 	 */
353 	unsigned long timeout_jiffies;
354 };
355 
356 /* Contains datapath state used to represent a TX queue. */
357 struct gve_tx_ring {
358 	/* Cacheline 0 -- Accessed & dirtied during transmit */
359 	union {
360 		/* GQI fields */
361 		struct {
362 			struct gve_tx_fifo tx_fifo;
363 			u32 req; /* driver tracked head pointer */
364 			u32 done; /* driver tracked tail pointer */
365 		};
366 
367 		/* DQO fields. */
368 		struct {
369 			/* Linked list of gve_tx_pending_packet_dqo. Index into
370 			 * pending_packets, or -1 if empty.
371 			 *
372 			 * This is a consumer list owned by the TX path. When it
373 			 * runs out, the producer list is stolen from the
374 			 * completion handling path
375 			 * (dqo_compl.free_pending_packets).
376 			 */
377 			s16 free_pending_packets;
378 
379 			/* Cached value of `dqo_compl.hw_tx_head` */
380 			u32 head;
381 			u32 tail; /* Last posted buffer index + 1 */
382 
383 			/* Index of the last descriptor with "report event" bit
384 			 * set.
385 			 */
386 			u32 last_re_idx;
387 		} dqo_tx;
388 	};
389 
390 	/* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
391 	union {
392 		/* GQI fields */
393 		struct {
394 			/* Spinlock for when cleanup in progress */
395 			spinlock_t clean_lock;
396 			/* Spinlock for XDP tx traffic */
397 			spinlock_t xdp_lock;
398 		};
399 
400 		/* DQO fields. */
401 		struct {
402 			u32 head; /* Last read on compl_desc */
403 
404 			/* Tracks the current gen bit of compl_q */
405 			u8 cur_gen_bit;
406 
407 			/* Linked list of gve_tx_pending_packet_dqo. Index into
408 			 * pending_packets, or -1 if empty.
409 			 *
410 			 * This is the producer list, owned by the completion
411 			 * handling path. When the consumer list
412 			 * (dqo_tx.free_pending_packets) is runs out, this list
413 			 * will be stolen.
414 			 */
415 			atomic_t free_pending_packets;
416 
417 			/* Last TX ring index fetched by HW */
418 			atomic_t hw_tx_head;
419 
420 			/* List to track pending packets which received a miss
421 			 * completion but not a corresponding reinjection.
422 			 */
423 			struct gve_index_list miss_completions;
424 
425 			/* List to track pending packets that were completed
426 			 * before receiving a valid completion because they
427 			 * reached a specified timeout.
428 			 */
429 			struct gve_index_list timed_out_completions;
430 		} dqo_compl;
431 	} ____cacheline_aligned;
432 	u64 pkt_done; /* free-running - total packets completed */
433 	u64 bytes_done; /* free-running - total bytes completed */
434 	u64 dropped_pkt; /* free-running - total packets dropped */
435 	u64 dma_mapping_error; /* count of dma mapping errors */
436 
437 	/* Cacheline 2 -- Read-mostly fields */
438 	union {
439 		/* GQI fields */
440 		struct {
441 			union gve_tx_desc *desc;
442 
443 			/* Maps 1:1 to a desc */
444 			struct gve_tx_buffer_state *info;
445 		};
446 
447 		/* DQO fields. */
448 		struct {
449 			union gve_tx_desc_dqo *tx_ring;
450 			struct gve_tx_compl_desc *compl_ring;
451 
452 			struct gve_tx_pending_packet_dqo *pending_packets;
453 			s16 num_pending_packets;
454 
455 			u32 complq_mask; /* complq size is complq_mask + 1 */
456 		} dqo;
457 	} ____cacheline_aligned;
458 	struct netdev_queue *netdev_txq;
459 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
460 	struct device *dev;
461 	u32 mask; /* masks req and done down to queue size */
462 	u8 raw_addressing; /* use raw_addressing? */
463 
464 	/* Slow-path fields */
465 	u32 q_num ____cacheline_aligned; /* queue idx */
466 	u32 stop_queue; /* count of queue stops */
467 	u32 wake_queue; /* count of queue wakes */
468 	u32 queue_timeout; /* count of queue timeouts */
469 	u32 ntfy_id; /* notification block index */
470 	u32 last_kick_msec; /* Last time the queue was kicked */
471 	dma_addr_t bus; /* dma address of the descr ring */
472 	dma_addr_t q_resources_bus; /* dma address of the queue resources */
473 	dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
474 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
475 	struct xsk_buff_pool *xsk_pool;
476 	u32 xdp_xsk_wakeup;
477 	u32 xdp_xsk_done;
478 	u64 xdp_xsk_sent;
479 	u64 xdp_xmit;
480 	u64 xdp_xmit_errors;
481 } ____cacheline_aligned;
482 
483 /* Wraps the info for one irq including the napi struct and the queues
484  * associated with that irq.
485  */
486 struct gve_notify_block {
487 	__be32 *irq_db_index; /* pointer to idx into Bar2 */
488 	char name[IFNAMSIZ + 16]; /* name registered with the kernel */
489 	struct napi_struct napi; /* kernel napi struct for this block */
490 	struct gve_priv *priv;
491 	struct gve_tx_ring *tx; /* tx rings on this block */
492 	struct gve_rx_ring *rx; /* rx rings on this block */
493 };
494 
495 /* Tracks allowed and current queue settings */
496 struct gve_queue_config {
497 	u16 max_queues;
498 	u16 num_queues; /* current */
499 };
500 
501 /* Tracks the available and used qpl IDs */
502 struct gve_qpl_config {
503 	u32 qpl_map_size; /* map memory size */
504 	unsigned long *qpl_id_map; /* bitmap of used qpl ids */
505 };
506 
507 struct gve_options_dqo_rda {
508 	u16 tx_comp_ring_entries; /* number of tx_comp descriptors */
509 	u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
510 };
511 
512 struct gve_irq_db {
513 	__be32 index;
514 } ____cacheline_aligned;
515 
516 struct gve_ptype {
517 	u8 l3_type;  /* `gve_l3_type` in gve_adminq.h */
518 	u8 l4_type;  /* `gve_l4_type` in gve_adminq.h */
519 };
520 
521 struct gve_ptype_lut {
522 	struct gve_ptype ptypes[GVE_NUM_PTYPES];
523 };
524 
525 /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
526  * when the entire configure_device_resources command is zeroed out and the
527  * queue_format is not specified.
528  */
529 enum gve_queue_format {
530 	GVE_QUEUE_FORMAT_UNSPECIFIED	= 0x0,
531 	GVE_GQI_RDA_FORMAT		= 0x1,
532 	GVE_GQI_QPL_FORMAT		= 0x2,
533 	GVE_DQO_RDA_FORMAT		= 0x3,
534 };
535 
536 struct gve_priv {
537 	struct net_device *dev;
538 	struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
539 	struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
540 	struct gve_queue_page_list *qpls; /* array of num qpls */
541 	struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
542 	struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
543 	dma_addr_t irq_db_indices_bus;
544 	struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
545 	char mgmt_msix_name[IFNAMSIZ + 16];
546 	u32 mgmt_msix_idx;
547 	__be32 *counter_array; /* array of num_event_counters */
548 	dma_addr_t counter_array_bus;
549 
550 	u16 num_event_counters;
551 	u16 tx_desc_cnt; /* num desc per ring */
552 	u16 rx_desc_cnt; /* num desc per ring */
553 	u16 tx_pages_per_qpl; /* tx buffer length */
554 	u16 rx_data_slot_cnt; /* rx buffer length */
555 	u64 max_registered_pages;
556 	u64 num_registered_pages; /* num pages registered with NIC */
557 	struct bpf_prog *xdp_prog; /* XDP BPF program */
558 	u32 rx_copybreak; /* copy packets smaller than this */
559 	u16 default_num_queues; /* default num queues to set up */
560 
561 	u16 num_xdp_queues;
562 	struct gve_queue_config tx_cfg;
563 	struct gve_queue_config rx_cfg;
564 	struct gve_qpl_config qpl_cfg; /* map used QPL ids */
565 	u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
566 
567 	struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
568 	__be32 __iomem *db_bar2; /* "array" of doorbells */
569 	u32 msg_enable;	/* level for netif* netdev print macros	*/
570 	struct pci_dev *pdev;
571 
572 	/* metrics */
573 	u32 tx_timeo_cnt;
574 
575 	/* Admin queue - see gve_adminq.h*/
576 	union gve_adminq_command *adminq;
577 	dma_addr_t adminq_bus_addr;
578 	u32 adminq_mask; /* masks prod_cnt to adminq size */
579 	u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
580 	u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
581 	u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
582 	/* free-running count of per AQ cmd executed */
583 	u32 adminq_describe_device_cnt;
584 	u32 adminq_cfg_device_resources_cnt;
585 	u32 adminq_register_page_list_cnt;
586 	u32 adminq_unregister_page_list_cnt;
587 	u32 adminq_create_tx_queue_cnt;
588 	u32 adminq_create_rx_queue_cnt;
589 	u32 adminq_destroy_tx_queue_cnt;
590 	u32 adminq_destroy_rx_queue_cnt;
591 	u32 adminq_dcfg_device_resources_cnt;
592 	u32 adminq_set_driver_parameter_cnt;
593 	u32 adminq_report_stats_cnt;
594 	u32 adminq_report_link_speed_cnt;
595 	u32 adminq_get_ptype_map_cnt;
596 	u32 adminq_verify_driver_compatibility_cnt;
597 
598 	/* Global stats */
599 	u32 interface_up_cnt; /* count of times interface turned up since last reset */
600 	u32 interface_down_cnt; /* count of times interface turned down since last reset */
601 	u32 reset_cnt; /* count of reset */
602 	u32 page_alloc_fail; /* count of page alloc fails */
603 	u32 dma_mapping_error; /* count of dma mapping errors */
604 	u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
605 	u32 suspend_cnt; /* count of times suspended */
606 	u32 resume_cnt; /* count of times resumed */
607 	struct workqueue_struct *gve_wq;
608 	struct work_struct service_task;
609 	struct work_struct stats_report_task;
610 	unsigned long service_task_flags;
611 	unsigned long state_flags;
612 
613 	struct gve_stats_report *stats_report;
614 	u64 stats_report_len;
615 	dma_addr_t stats_report_bus; /* dma address for the stats report */
616 	unsigned long ethtool_flags;
617 
618 	unsigned long stats_report_timer_period;
619 	struct timer_list stats_report_timer;
620 
621 	/* Gvnic device link speed from hypervisor. */
622 	u64 link_speed;
623 	bool up_before_suspend; /* True if dev was up before suspend */
624 
625 	struct gve_options_dqo_rda options_dqo_rda;
626 	struct gve_ptype_lut *ptype_lut_dqo;
627 
628 	/* Must be a power of two. */
629 	int data_buffer_size_dqo;
630 
631 	enum gve_queue_format queue_format;
632 
633 	/* Interrupt coalescing settings */
634 	u32 tx_coalesce_usecs;
635 	u32 rx_coalesce_usecs;
636 };
637 
638 enum gve_service_task_flags_bit {
639 	GVE_PRIV_FLAGS_DO_RESET			= 1,
640 	GVE_PRIV_FLAGS_RESET_IN_PROGRESS	= 2,
641 	GVE_PRIV_FLAGS_PROBE_IN_PROGRESS	= 3,
642 	GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
643 };
644 
645 enum gve_state_flags_bit {
646 	GVE_PRIV_FLAGS_ADMIN_QUEUE_OK		= 1,
647 	GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK	= 2,
648 	GVE_PRIV_FLAGS_DEVICE_RINGS_OK		= 3,
649 	GVE_PRIV_FLAGS_NAPI_ENABLED		= 4,
650 };
651 
652 enum gve_ethtool_flags_bit {
653 	GVE_PRIV_FLAGS_REPORT_STATS		= 0,
654 };
655 
656 static inline bool gve_get_do_reset(struct gve_priv *priv)
657 {
658 	return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
659 }
660 
661 static inline void gve_set_do_reset(struct gve_priv *priv)
662 {
663 	set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
664 }
665 
666 static inline void gve_clear_do_reset(struct gve_priv *priv)
667 {
668 	clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
669 }
670 
671 static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
672 {
673 	return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
674 			&priv->service_task_flags);
675 }
676 
677 static inline void gve_set_reset_in_progress(struct gve_priv *priv)
678 {
679 	set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
680 }
681 
682 static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
683 {
684 	clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
685 }
686 
687 static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
688 {
689 	return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
690 			&priv->service_task_flags);
691 }
692 
693 static inline void gve_set_probe_in_progress(struct gve_priv *priv)
694 {
695 	set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
696 }
697 
698 static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
699 {
700 	clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
701 }
702 
703 static inline bool gve_get_do_report_stats(struct gve_priv *priv)
704 {
705 	return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
706 			&priv->service_task_flags);
707 }
708 
709 static inline void gve_set_do_report_stats(struct gve_priv *priv)
710 {
711 	set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
712 }
713 
714 static inline void gve_clear_do_report_stats(struct gve_priv *priv)
715 {
716 	clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
717 }
718 
719 static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
720 {
721 	return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
722 }
723 
724 static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
725 {
726 	set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
727 }
728 
729 static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
730 {
731 	clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
732 }
733 
734 static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
735 {
736 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
737 }
738 
739 static inline void gve_set_device_resources_ok(struct gve_priv *priv)
740 {
741 	set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
742 }
743 
744 static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
745 {
746 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
747 }
748 
749 static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
750 {
751 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
752 }
753 
754 static inline void gve_set_device_rings_ok(struct gve_priv *priv)
755 {
756 	set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
757 }
758 
759 static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
760 {
761 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
762 }
763 
764 static inline bool gve_get_napi_enabled(struct gve_priv *priv)
765 {
766 	return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
767 }
768 
769 static inline void gve_set_napi_enabled(struct gve_priv *priv)
770 {
771 	set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
772 }
773 
774 static inline void gve_clear_napi_enabled(struct gve_priv *priv)
775 {
776 	clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
777 }
778 
779 static inline bool gve_get_report_stats(struct gve_priv *priv)
780 {
781 	return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
782 }
783 
784 static inline void gve_clear_report_stats(struct gve_priv *priv)
785 {
786 	clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
787 }
788 
789 /* Returns the address of the ntfy_blocks irq doorbell
790  */
791 static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
792 					       struct gve_notify_block *block)
793 {
794 	return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)];
795 }
796 
797 /* Returns the index into ntfy_blocks of the given tx ring's block
798  */
799 static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
800 {
801 	return queue_idx;
802 }
803 
804 /* Returns the index into ntfy_blocks of the given rx ring's block
805  */
806 static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
807 {
808 	return (priv->num_ntfy_blks / 2) + queue_idx;
809 }
810 
811 /* Returns the number of tx queue page lists
812  */
813 static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
814 {
815 	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
816 		return 0;
817 
818 	return priv->tx_cfg.num_queues + priv->num_xdp_queues;
819 }
820 
821 /* Returns the number of XDP tx queue page lists
822  */
823 static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
824 {
825 	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
826 		return 0;
827 
828 	return priv->num_xdp_queues;
829 }
830 
831 /* Returns the number of rx queue page lists
832  */
833 static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
834 {
835 	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
836 		return 0;
837 
838 	return priv->rx_cfg.num_queues;
839 }
840 
841 static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
842 {
843 	return tx_qid;
844 }
845 
846 static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
847 {
848 	return priv->tx_cfg.max_queues + rx_qid;
849 }
850 
851 static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
852 {
853 	return gve_tx_qpl_id(priv, 0);
854 }
855 
856 static inline u32 gve_rx_start_qpl_id(struct gve_priv *priv)
857 {
858 	return gve_rx_qpl_id(priv, 0);
859 }
860 
861 /* Returns a pointer to the next available tx qpl in the list of qpls
862  */
863 static inline
864 struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv, int tx_qid)
865 {
866 	int id = gve_tx_qpl_id(priv, tx_qid);
867 
868 	/* QPL already in use */
869 	if (test_bit(id, priv->qpl_cfg.qpl_id_map))
870 		return NULL;
871 
872 	set_bit(id, priv->qpl_cfg.qpl_id_map);
873 	return &priv->qpls[id];
874 }
875 
876 /* Returns a pointer to the next available rx qpl in the list of qpls
877  */
878 static inline
879 struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv, int rx_qid)
880 {
881 	int id = gve_rx_qpl_id(priv, rx_qid);
882 
883 	/* QPL already in use */
884 	if (test_bit(id, priv->qpl_cfg.qpl_id_map))
885 		return NULL;
886 
887 	set_bit(id, priv->qpl_cfg.qpl_id_map);
888 	return &priv->qpls[id];
889 }
890 
891 /* Unassigns the qpl with the given id
892  */
893 static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
894 {
895 	clear_bit(id, priv->qpl_cfg.qpl_id_map);
896 }
897 
898 /* Returns the correct dma direction for tx and rx qpls
899  */
900 static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
901 						      int id)
902 {
903 	if (id < gve_rx_start_qpl_id(priv))
904 		return DMA_TO_DEVICE;
905 	else
906 		return DMA_FROM_DEVICE;
907 }
908 
909 static inline bool gve_is_gqi(struct gve_priv *priv)
910 {
911 	return priv->queue_format == GVE_GQI_RDA_FORMAT ||
912 		priv->queue_format == GVE_GQI_QPL_FORMAT;
913 }
914 
915 static inline u32 gve_num_tx_queues(struct gve_priv *priv)
916 {
917 	return priv->tx_cfg.num_queues + priv->num_xdp_queues;
918 }
919 
920 static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
921 {
922 	return priv->tx_cfg.num_queues + queue_id;
923 }
924 
925 static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
926 {
927 	return gve_xdp_tx_queue_id(priv, 0);
928 }
929 
930 /* buffers */
931 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
932 		   struct page **page, dma_addr_t *dma,
933 		   enum dma_data_direction, gfp_t gfp_flags);
934 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
935 		   enum dma_data_direction);
936 /* tx handling */
937 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
938 int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
939 		 u32 flags);
940 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
941 		     void *data, int len, void *frame_p);
942 void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
943 bool gve_tx_poll(struct gve_notify_block *block, int budget);
944 bool gve_xdp_poll(struct gve_notify_block *block, int budget);
945 int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings);
946 void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings);
947 u32 gve_tx_load_event_counter(struct gve_priv *priv,
948 			      struct gve_tx_ring *tx);
949 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
950 /* rx handling */
951 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
952 int gve_rx_poll(struct gve_notify_block *block, int budget);
953 bool gve_rx_work_pending(struct gve_rx_ring *rx);
954 int gve_rx_alloc_rings(struct gve_priv *priv);
955 void gve_rx_free_rings_gqi(struct gve_priv *priv);
956 /* Reset */
957 void gve_schedule_reset(struct gve_priv *priv);
958 int gve_reset(struct gve_priv *priv, bool attempt_teardown);
959 int gve_adjust_queues(struct gve_priv *priv,
960 		      struct gve_queue_config new_rx_config,
961 		      struct gve_queue_config new_tx_config);
962 /* report stats handling */
963 void gve_handle_report_stats(struct gve_priv *priv);
964 /* exported by ethtool.c */
965 extern const struct ethtool_ops gve_ethtool_ops;
966 /* needed by ethtool */
967 extern const char gve_version_str[];
968 #endif /* _GVE_H_ */
969