1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2  * Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2021 Google, Inc.
5  */
6 
7 #ifndef _GVE_ADMINQ_H
8 #define _GVE_ADMINQ_H
9 
10 #include <linux/build_bug.h>
11 
12 /* Admin queue opcodes */
13 enum gve_adminq_opcodes {
14 	GVE_ADMINQ_DESCRIBE_DEVICE		= 0x1,
15 	GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES	= 0x2,
16 	GVE_ADMINQ_REGISTER_PAGE_LIST		= 0x3,
17 	GVE_ADMINQ_UNREGISTER_PAGE_LIST		= 0x4,
18 	GVE_ADMINQ_CREATE_TX_QUEUE		= 0x5,
19 	GVE_ADMINQ_CREATE_RX_QUEUE		= 0x6,
20 	GVE_ADMINQ_DESTROY_TX_QUEUE		= 0x7,
21 	GVE_ADMINQ_DESTROY_RX_QUEUE		= 0x8,
22 	GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES	= 0x9,
23 	GVE_ADMINQ_SET_DRIVER_PARAMETER		= 0xB,
24 	GVE_ADMINQ_REPORT_STATS			= 0xC,
25 	GVE_ADMINQ_REPORT_LINK_SPEED		= 0xD,
26 	GVE_ADMINQ_GET_PTYPE_MAP		= 0xE,
27 };
28 
29 /* Admin queue status codes */
30 enum gve_adminq_statuses {
31 	GVE_ADMINQ_COMMAND_UNSET			= 0x0,
32 	GVE_ADMINQ_COMMAND_PASSED			= 0x1,
33 	GVE_ADMINQ_COMMAND_ERROR_ABORTED		= 0xFFFFFFF0,
34 	GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS		= 0xFFFFFFF1,
35 	GVE_ADMINQ_COMMAND_ERROR_CANCELLED		= 0xFFFFFFF2,
36 	GVE_ADMINQ_COMMAND_ERROR_DATALOSS		= 0xFFFFFFF3,
37 	GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED	= 0xFFFFFFF4,
38 	GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION	= 0xFFFFFFF5,
39 	GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR		= 0xFFFFFFF6,
40 	GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT	= 0xFFFFFFF7,
41 	GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND		= 0xFFFFFFF8,
42 	GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE		= 0xFFFFFFF9,
43 	GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED	= 0xFFFFFFFA,
44 	GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED	= 0xFFFFFFFB,
45 	GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED	= 0xFFFFFFFC,
46 	GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE		= 0xFFFFFFFD,
47 	GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED		= 0xFFFFFFFE,
48 	GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR		= 0xFFFFFFFF,
49 };
50 
51 #define GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION 1
52 
53 /* All AdminQ command structs should be naturally packed. The static_assert
54  * calls make sure this is the case at compile time.
55  */
56 
57 struct gve_adminq_describe_device {
58 	__be64 device_descriptor_addr;
59 	__be32 device_descriptor_version;
60 	__be32 available_length;
61 };
62 
63 static_assert(sizeof(struct gve_adminq_describe_device) == 16);
64 
65 struct gve_device_descriptor {
66 	__be64 max_registered_pages;
67 	__be16 reserved1;
68 	__be16 tx_queue_entries;
69 	__be16 rx_queue_entries;
70 	__be16 default_num_queues;
71 	__be16 mtu;
72 	__be16 counters;
73 	__be16 tx_pages_per_qpl;
74 	__be16 rx_pages_per_qpl;
75 	u8  mac[ETH_ALEN];
76 	__be16 num_device_options;
77 	__be16 total_length;
78 	u8  reserved2[6];
79 };
80 
81 static_assert(sizeof(struct gve_device_descriptor) == 40);
82 
83 struct gve_device_option {
84 	__be16 option_id;
85 	__be16 option_length;
86 	__be32 required_features_mask;
87 };
88 
89 static_assert(sizeof(struct gve_device_option) == 8);
90 
91 struct gve_device_option_gqi_rda {
92 	__be32 supported_features_mask;
93 };
94 
95 static_assert(sizeof(struct gve_device_option_gqi_rda) == 4);
96 
97 struct gve_device_option_gqi_qpl {
98 	__be32 supported_features_mask;
99 };
100 
101 static_assert(sizeof(struct gve_device_option_gqi_qpl) == 4);
102 
103 struct gve_device_option_dqo_rda {
104 	__be32 supported_features_mask;
105 	__be16 tx_comp_ring_entries;
106 	__be16 rx_buff_ring_entries;
107 };
108 
109 static_assert(sizeof(struct gve_device_option_dqo_rda) == 8);
110 
111 struct gve_device_option_jumbo_frames {
112 	__be32 supported_features_mask;
113 	__be16 max_mtu;
114 	u8 padding[2];
115 };
116 
117 static_assert(sizeof(struct gve_device_option_jumbo_frames) == 8);
118 
119 /* Terminology:
120  *
121  * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
122  *       mapped and read/updated by the device.
123  *
124  * QPL - Queue Page Lists - Driver uses bounce buffers which are DMA mapped with
125  *       the device for read/write and data is copied from/to SKBs.
126  */
127 enum gve_dev_opt_id {
128 	GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1,
129 	GVE_DEV_OPT_ID_GQI_RDA = 0x2,
130 	GVE_DEV_OPT_ID_GQI_QPL = 0x3,
131 	GVE_DEV_OPT_ID_DQO_RDA = 0x4,
132 	GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
133 };
134 
135 enum gve_dev_opt_req_feat_mask {
136 	GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0,
137 	GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
138 	GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
139 	GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
140 	GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
141 };
142 
143 enum gve_sup_feature_mask {
144 	GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
145 };
146 
147 #define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
148 
149 struct gve_adminq_configure_device_resources {
150 	__be64 counter_array;
151 	__be64 irq_db_addr;
152 	__be32 num_counters;
153 	__be32 num_irq_dbs;
154 	__be32 irq_db_stride;
155 	__be32 ntfy_blk_msix_base_idx;
156 	u8 queue_format;
157 	u8 padding[7];
158 };
159 
160 static_assert(sizeof(struct gve_adminq_configure_device_resources) == 40);
161 
162 struct gve_adminq_register_page_list {
163 	__be32 page_list_id;
164 	__be32 num_pages;
165 	__be64 page_address_list_addr;
166 };
167 
168 static_assert(sizeof(struct gve_adminq_register_page_list) == 16);
169 
170 struct gve_adminq_unregister_page_list {
171 	__be32 page_list_id;
172 };
173 
174 static_assert(sizeof(struct gve_adminq_unregister_page_list) == 4);
175 
176 #define GVE_RAW_ADDRESSING_QPL_ID 0xFFFFFFFF
177 
178 struct gve_adminq_create_tx_queue {
179 	__be32 queue_id;
180 	__be32 reserved;
181 	__be64 queue_resources_addr;
182 	__be64 tx_ring_addr;
183 	__be32 queue_page_list_id;
184 	__be32 ntfy_id;
185 	__be64 tx_comp_ring_addr;
186 	__be16 tx_ring_size;
187 	__be16 tx_comp_ring_size;
188 	u8 padding[4];
189 };
190 
191 static_assert(sizeof(struct gve_adminq_create_tx_queue) == 48);
192 
193 struct gve_adminq_create_rx_queue {
194 	__be32 queue_id;
195 	__be32 index;
196 	__be32 reserved;
197 	__be32 ntfy_id;
198 	__be64 queue_resources_addr;
199 	__be64 rx_desc_ring_addr;
200 	__be64 rx_data_ring_addr;
201 	__be32 queue_page_list_id;
202 	__be16 rx_ring_size;
203 	__be16 packet_buffer_size;
204 	__be16 rx_buff_ring_size;
205 	u8 enable_rsc;
206 	u8 padding[5];
207 };
208 
209 static_assert(sizeof(struct gve_adminq_create_rx_queue) == 56);
210 
211 /* Queue resources that are shared with the device */
212 struct gve_queue_resources {
213 	union {
214 		struct {
215 			__be32 db_index;	/* Device -> Guest */
216 			__be32 counter_index;	/* Device -> Guest */
217 		};
218 		u8 reserved[64];
219 	};
220 };
221 
222 static_assert(sizeof(struct gve_queue_resources) == 64);
223 
224 struct gve_adminq_destroy_tx_queue {
225 	__be32 queue_id;
226 };
227 
228 static_assert(sizeof(struct gve_adminq_destroy_tx_queue) == 4);
229 
230 struct gve_adminq_destroy_rx_queue {
231 	__be32 queue_id;
232 };
233 
234 static_assert(sizeof(struct gve_adminq_destroy_rx_queue) == 4);
235 
236 /* GVE Set Driver Parameter Types */
237 enum gve_set_driver_param_types {
238 	GVE_SET_PARAM_MTU	= 0x1,
239 };
240 
241 struct gve_adminq_set_driver_parameter {
242 	__be32 parameter_type;
243 	u8 reserved[4];
244 	__be64 parameter_value;
245 };
246 
247 static_assert(sizeof(struct gve_adminq_set_driver_parameter) == 16);
248 
249 struct gve_adminq_report_stats {
250 	__be64 stats_report_len;
251 	__be64 stats_report_addr;
252 	__be64 interval;
253 };
254 
255 static_assert(sizeof(struct gve_adminq_report_stats) == 24);
256 
257 struct gve_adminq_report_link_speed {
258 	__be64 link_speed_address;
259 };
260 
261 static_assert(sizeof(struct gve_adminq_report_link_speed) == 8);
262 
263 struct stats {
264 	__be32 stat_name;
265 	__be32 queue_id;
266 	__be64 value;
267 };
268 
269 static_assert(sizeof(struct stats) == 16);
270 
271 struct gve_stats_report {
272 	__be64 written_count;
273 	struct stats stats[];
274 };
275 
276 static_assert(sizeof(struct gve_stats_report) == 8);
277 
278 enum gve_stat_names {
279 	// stats from gve
280 	TX_WAKE_CNT			= 1,
281 	TX_STOP_CNT			= 2,
282 	TX_FRAMES_SENT			= 3,
283 	TX_BYTES_SENT			= 4,
284 	TX_LAST_COMPLETION_PROCESSED	= 5,
285 	RX_NEXT_EXPECTED_SEQUENCE	= 6,
286 	RX_BUFFERS_POSTED		= 7,
287 	TX_TIMEOUT_CNT			= 8,
288 	// stats from NIC
289 	RX_QUEUE_DROP_CNT		= 65,
290 	RX_NO_BUFFERS_POSTED		= 66,
291 	RX_DROPS_PACKET_OVER_MRU	= 67,
292 	RX_DROPS_INVALID_CHECKSUM	= 68,
293 };
294 
295 enum gve_l3_type {
296 	/* Must be zero so zero initialized LUT is unknown. */
297 	GVE_L3_TYPE_UNKNOWN = 0,
298 	GVE_L3_TYPE_OTHER,
299 	GVE_L3_TYPE_IPV4,
300 	GVE_L3_TYPE_IPV6,
301 };
302 
303 enum gve_l4_type {
304 	/* Must be zero so zero initialized LUT is unknown. */
305 	GVE_L4_TYPE_UNKNOWN = 0,
306 	GVE_L4_TYPE_OTHER,
307 	GVE_L4_TYPE_TCP,
308 	GVE_L4_TYPE_UDP,
309 	GVE_L4_TYPE_ICMP,
310 	GVE_L4_TYPE_SCTP,
311 };
312 
313 /* These are control path types for PTYPE which are the same as the data path
314  * types.
315  */
316 struct gve_ptype_entry {
317 	u8 l3_type;
318 	u8 l4_type;
319 };
320 
321 struct gve_ptype_map {
322 	struct gve_ptype_entry ptypes[1 << 10]; /* PTYPES are always 10 bits. */
323 };
324 
325 struct gve_adminq_get_ptype_map {
326 	__be64 ptype_map_len;
327 	__be64 ptype_map_addr;
328 };
329 
330 union gve_adminq_command {
331 	struct {
332 		__be32 opcode;
333 		__be32 status;
334 		union {
335 			struct gve_adminq_configure_device_resources
336 						configure_device_resources;
337 			struct gve_adminq_create_tx_queue create_tx_queue;
338 			struct gve_adminq_create_rx_queue create_rx_queue;
339 			struct gve_adminq_destroy_tx_queue destroy_tx_queue;
340 			struct gve_adminq_destroy_rx_queue destroy_rx_queue;
341 			struct gve_adminq_describe_device describe_device;
342 			struct gve_adminq_register_page_list reg_page_list;
343 			struct gve_adminq_unregister_page_list unreg_page_list;
344 			struct gve_adminq_set_driver_parameter set_driver_param;
345 			struct gve_adminq_report_stats report_stats;
346 			struct gve_adminq_report_link_speed report_link_speed;
347 			struct gve_adminq_get_ptype_map get_ptype_map;
348 		};
349 	};
350 	u8 reserved[64];
351 };
352 
353 static_assert(sizeof(union gve_adminq_command) == 64);
354 
355 int gve_adminq_alloc(struct device *dev, struct gve_priv *priv);
356 void gve_adminq_free(struct device *dev, struct gve_priv *priv);
357 void gve_adminq_release(struct gve_priv *priv);
358 int gve_adminq_describe_device(struct gve_priv *priv);
359 int gve_adminq_configure_device_resources(struct gve_priv *priv,
360 					  dma_addr_t counter_array_bus_addr,
361 					  u32 num_counters,
362 					  dma_addr_t db_array_bus_addr,
363 					  u32 num_ntfy_blks);
364 int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);
365 int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues);
366 int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 queue_id);
367 int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues);
368 int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
369 int gve_adminq_register_page_list(struct gve_priv *priv,
370 				  struct gve_queue_page_list *qpl);
371 int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id);
372 int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu);
373 int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
374 			    dma_addr_t stats_report_addr, u64 interval);
375 int gve_adminq_report_link_speed(struct gve_priv *priv);
376 
377 struct gve_ptype_lut;
378 int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
379 				 struct gve_ptype_lut *ptype_lut);
380 
381 #endif /* _GVE_ADMINQ_H */
382