Lines Matching +full:queue +full:- +full:pkt +full:- +full:tx

1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
4 * Copyright (C) 2015-2021 Google, Inc.
10 #include <linux/dma-mapping.h>
30 /* 1 for management, 1 for rx, 1 for tx */
33 /* Numbers of gve tx/rx stats in stats report. */
40 /* Numbers of NIC tx/rx stats in stats report. */
44 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
63 /* 2K buffers for DQO-QPL */
69 * allocs and uses a non-qpl page on the receive path of DQO QPL to free
82 /* The page info for a single slot in the RX data queue */
92 /* A list of pages registered with the device during setup and used by a queue
107 struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
113 /* RX buffer queue for posting buffers to HW.
114 * Each RX (completion) queue has a corresponding buffer queue.
124 /* RX completion queue to receive packets from HW. */
130 * post more buffers than the queue size to avoid HW overrunning the
131 * queue.
161 /* Linked list index to next element in the list, or -1 if none */
165 /* `head` and `tail` are indices into an array, or -1 if empty. */
191 /* Contains datapath state used to represent an RX queue. */
218 * buf_states, or -1 if empty.
223 * buf_states, or -1 if empty.
235 * buf_states, or -1 if empty.
242 /* qpl assigned to this queue */
245 /* index into queue page list */
253 u64 rbytes; /* free-running bytes received */
254 u64 rpackets; /* free-running packets received */
255 u32 cnt; /* free-running total number of completed packets */
256 u32 fill_cnt; /* free-running total number of descs and buffs posted */
258 u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
259 u64 rx_copied_pkt; /* free-running total number of copied packets */
260 u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
261 u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
262 u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
263 u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
264 u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
265 u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
266 u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
271 u32 q_num; /* queue index */
274 dma_addr_t q_resources_bus; /* dma address for the queue resources */
286 /* A TX desc ring entry */
288 struct gve_tx_pkt_desc pkt; /* first desc for a packet */ member
305 struct sk_buff *skb; /* skb for this pkt */
309 u16 size; /* size of xmitted xdp pkt */
313 struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
321 /* A TX buffer - each queue has one */
330 /* TX descriptor for DQO format */
332 struct gve_tx_pkt_desc_dqo pkt; member
345 * re-injection completion.
371 /* Linked list index to next element in the list, or -1 if none */
374 /* Linked list index to prev element in the list, or -1 if none.
386 * freed if the corresponding re-injection completion is not received
392 /* Contains datapath state used to represent a TX queue. */
394 /* Cacheline 0 -- Accessed & dirtied during transmit */
406 * pending_packets, or -1 if empty.
408 * This is a consumer list owned by the TX path. When it
432 * tx_qpl_buf_next, or -1 if empty.
434 * This is a consumer list owned by the TX path. When it
441 /* Free running count of the number of QPL tx buffers
452 /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
458 /* Spinlock for XDP tx traffic */
470 * pending_packets, or -1 if empty.
479 /* Last TX ring index fetched by HW */
496 * tx_qpl_buf_next, or -1 if empty.
505 /* Free running count of the number of tx buffers
512 u64 pkt_done; /* free-running - total packets completed */
513 u64 bytes_done; /* free-running - total bytes completed */
514 u64 dropped_pkt; /* free-running - total packets dropped */
517 /* Cacheline 2 -- Read-mostly fields */
539 /* qpl assigned to this queue */
542 /* Each QPL page is divided into TX bounce buffers
544 * an array to manage linked lists of TX buffers.
556 u32 mask; /* masks req and done down to queue size */
559 /* Slow-path fields */
560 u32 q_num ____cacheline_aligned; /* queue idx */
561 u32 stop_queue; /* count of queue stops */
562 u32 wake_queue; /* count of queue wakes */
563 u32 queue_timeout; /* count of queue timeouts */
565 u32 last_kick_msec; /* Last time the queue was kicked */
567 dma_addr_t q_resources_bus; /* dma address of the queue resources */
586 struct gve_tx_ring *tx; /* tx rings on this block */ member
590 /* Tracks allowed and current queue settings */
634 struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */ member
649 u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
662 u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
672 /* Admin queue - see gve_adminq.h*/
676 u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
677 u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
678 u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
679 /* free-running count of per AQ cmd executed */
701 u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
755 return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); in gve_get_do_reset()
760 set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); in gve_set_do_reset()
765 clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); in gve_clear_do_reset()
771 &priv->service_task_flags); in gve_get_reset_in_progress()
776 set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags); in gve_set_reset_in_progress()
781 clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags); in gve_clear_reset_in_progress()
787 &priv->service_task_flags); in gve_get_probe_in_progress()
792 set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags); in gve_set_probe_in_progress()
797 clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags); in gve_clear_probe_in_progress()
803 &priv->service_task_flags); in gve_get_do_report_stats()
808 set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags); in gve_set_do_report_stats()
813 clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags); in gve_clear_do_report_stats()
818 return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); in gve_get_admin_queue_ok()
823 set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); in gve_set_admin_queue_ok()
828 clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); in gve_clear_admin_queue_ok()
833 return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); in gve_get_device_resources_ok()
838 set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); in gve_set_device_resources_ok()
843 clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); in gve_clear_device_resources_ok()
848 return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); in gve_get_device_rings_ok()
853 set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); in gve_set_device_rings_ok()
858 clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); in gve_clear_device_rings_ok()
863 return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); in gve_get_napi_enabled()
868 set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); in gve_set_napi_enabled()
873 clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); in gve_clear_napi_enabled()
878 return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags); in gve_get_report_stats()
883 clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags); in gve_clear_report_stats()
891 return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)]; in gve_irq_doorbell()
894 /* Returns the index into ntfy_blocks of the given tx ring's block
905 return (priv->num_ntfy_blks / 2) + queue_idx; in gve_rx_idx_to_ntfy()
910 return priv->queue_format == GVE_GQI_QPL_FORMAT || in gve_is_qpl()
911 priv->queue_format == GVE_DQO_QPL_FORMAT; in gve_is_qpl()
914 /* Returns the number of tx queue page lists
921 return priv->tx_cfg.num_queues + priv->num_xdp_queues; in gve_num_tx_qpls()
924 /* Returns the number of XDP tx queue page lists
928 if (priv->queue_format != GVE_GQI_QPL_FORMAT) in gve_num_xdp_qpls()
931 return priv->num_xdp_queues; in gve_num_xdp_qpls()
934 /* Returns the number of rx queue page lists
941 return priv->rx_cfg.num_queues; in gve_num_rx_qpls()
951 return priv->tx_cfg.max_queues + rx_qid; in gve_rx_qpl_id()
964 /* Returns a pointer to the next available tx qpl in the list of qpls
972 if (test_bit(id, priv->qpl_cfg.qpl_id_map)) in gve_assign_tx_qpl()
975 set_bit(id, priv->qpl_cfg.qpl_id_map); in gve_assign_tx_qpl()
976 return &priv->qpls[id]; in gve_assign_tx_qpl()
987 if (test_bit(id, priv->qpl_cfg.qpl_id_map)) in gve_assign_rx_qpl()
990 set_bit(id, priv->qpl_cfg.qpl_id_map); in gve_assign_rx_qpl()
991 return &priv->qpls[id]; in gve_assign_rx_qpl()
998 clear_bit(id, priv->qpl_cfg.qpl_id_map); in gve_unassign_qpl()
1001 /* Returns the correct dma direction for tx and rx qpls
1014 return priv->queue_format == GVE_GQI_RDA_FORMAT || in gve_is_gqi()
1015 priv->queue_format == GVE_GQI_QPL_FORMAT; in gve_is_gqi()
1020 return priv->tx_cfg.num_queues + priv->num_xdp_queues; in gve_num_tx_queues()
1025 return priv->tx_cfg.num_queues + queue_id; in gve_xdp_tx_queue_id()
1035 switch (priv->queue_format) { in gve_supports_xdp_xmit()
1049 /* tx handling */
1053 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
1061 struct gve_tx_ring *tx);
1062 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);