xref: /openbmc/linux/include/net/mana/mana.h (revision 9e0bff49)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #ifndef _MANA_H
5 #define _MANA_H
6 
7 #include <net/xdp.h>
8 
9 #include "gdma.h"
10 #include "hw_channel.h"
11 
12 /* Microsoft Azure Network Adapter (MANA)'s definitions
13  *
14  * Structures labeled with "HW DATA" are exchanged with the hardware. All of
15  * them are naturally aligned and hence don't need __packed.
16  */
17 
18 /* MANA protocol version */
19 #define MANA_MAJOR_VERSION	0
20 #define MANA_MINOR_VERSION	1
21 #define MANA_MICRO_VERSION	1
22 
23 typedef u64 mana_handle_t;
24 #define INVALID_MANA_HANDLE ((mana_handle_t)-1)
25 
26 enum TRI_STATE {
27 	TRI_STATE_UNKNOWN = -1,
28 	TRI_STATE_FALSE = 0,
29 	TRI_STATE_TRUE = 1
30 };
31 
32 /* Number of entries for hardware indirection table must be in power of 2 */
33 #define MANA_INDIRECT_TABLE_SIZE 64
34 #define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1)
35 
36 /* The Toeplitz hash key's length in bytes: should be multiple of 8 */
37 #define MANA_HASH_KEY_SIZE 40
38 
39 #define COMP_ENTRY_SIZE 64
40 
41 #define RX_BUFFERS_PER_QUEUE 512
42 
43 #define MAX_SEND_BUFFERS_PER_QUEUE 256
44 
45 #define EQ_SIZE (8 * PAGE_SIZE)
46 #define LOG2_EQ_THROTTLE 3
47 
48 #define MAX_PORTS_IN_MANA_DEV 256
49 
50 /* Update this count whenever the respective structures are changed */
51 #define MANA_STATS_RX_COUNT 5
52 #define MANA_STATS_TX_COUNT 11
53 
54 struct mana_stats_rx {
55 	u64 packets;
56 	u64 bytes;
57 	u64 xdp_drop;
58 	u64 xdp_tx;
59 	u64 xdp_redirect;
60 	struct u64_stats_sync syncp;
61 };
62 
63 struct mana_stats_tx {
64 	u64 packets;
65 	u64 bytes;
66 	u64 xdp_xmit;
67 	u64 tso_packets;
68 	u64 tso_bytes;
69 	u64 tso_inner_packets;
70 	u64 tso_inner_bytes;
71 	u64 short_pkt_fmt;
72 	u64 long_pkt_fmt;
73 	u64 csum_partial;
74 	u64 mana_map_err;
75 	struct u64_stats_sync syncp;
76 };
77 
78 struct mana_txq {
79 	struct gdma_queue *gdma_sq;
80 
81 	union {
82 		u32 gdma_txq_id;
83 		struct {
84 			u32 reserved1	: 10;
85 			u32 vsq_frame	: 14;
86 			u32 reserved2	: 8;
87 		};
88 	};
89 
90 	u16 vp_offset;
91 
92 	struct net_device *ndev;
93 
94 	/* The SKBs are sent to the HW and we are waiting for the CQEs. */
95 	struct sk_buff_head pending_skbs;
96 	struct netdev_queue *net_txq;
97 
98 	atomic_t pending_sends;
99 
100 	bool napi_initialized;
101 
102 	struct mana_stats_tx stats;
103 };
104 
105 /* skb data and frags dma mappings */
106 struct mana_skb_head {
107 	/* GSO pkts may have 2 SGEs for the linear part*/
108 	dma_addr_t dma_handle[MAX_SKB_FRAGS + 2];
109 
110 	u32 size[MAX_SKB_FRAGS + 2];
111 };
112 
113 #define MANA_HEADROOM sizeof(struct mana_skb_head)
114 
115 enum mana_tx_pkt_format {
116 	MANA_SHORT_PKT_FMT	= 0,
117 	MANA_LONG_PKT_FMT	= 1,
118 };
119 
120 struct mana_tx_short_oob {
121 	u32 pkt_fmt		: 2;
122 	u32 is_outer_ipv4	: 1;
123 	u32 is_outer_ipv6	: 1;
124 	u32 comp_iphdr_csum	: 1;
125 	u32 comp_tcp_csum	: 1;
126 	u32 comp_udp_csum	: 1;
127 	u32 supress_txcqe_gen	: 1;
128 	u32 vcq_num		: 24;
129 
130 	u32 trans_off		: 10; /* Transport header offset */
131 	u32 vsq_frame		: 14;
132 	u32 short_vp_offset	: 8;
133 }; /* HW DATA */
134 
135 struct mana_tx_long_oob {
136 	u32 is_encap		: 1;
137 	u32 inner_is_ipv6	: 1;
138 	u32 inner_tcp_opt	: 1;
139 	u32 inject_vlan_pri_tag : 1;
140 	u32 reserved1		: 12;
141 	u32 pcp			: 3;  /* 802.1Q */
142 	u32 dei			: 1;  /* 802.1Q */
143 	u32 vlan_id		: 12; /* 802.1Q */
144 
145 	u32 inner_frame_offset	: 10;
146 	u32 inner_ip_rel_offset : 6;
147 	u32 long_vp_offset	: 12;
148 	u32 reserved2		: 4;
149 
150 	u32 reserved3;
151 	u32 reserved4;
152 }; /* HW DATA */
153 
154 struct mana_tx_oob {
155 	struct mana_tx_short_oob s_oob;
156 	struct mana_tx_long_oob l_oob;
157 }; /* HW DATA */
158 
159 enum mana_cq_type {
160 	MANA_CQ_TYPE_RX,
161 	MANA_CQ_TYPE_TX,
162 };
163 
164 enum mana_cqe_type {
165 	CQE_INVALID			= 0,
166 	CQE_RX_OKAY			= 1,
167 	CQE_RX_COALESCED_4		= 2,
168 	CQE_RX_OBJECT_FENCE		= 3,
169 	CQE_RX_TRUNCATED		= 4,
170 
171 	CQE_TX_OKAY			= 32,
172 	CQE_TX_SA_DROP			= 33,
173 	CQE_TX_MTU_DROP			= 34,
174 	CQE_TX_INVALID_OOB		= 35,
175 	CQE_TX_INVALID_ETH_TYPE		= 36,
176 	CQE_TX_HDR_PROCESSING_ERROR	= 37,
177 	CQE_TX_VF_DISABLED		= 38,
178 	CQE_TX_VPORT_IDX_OUT_OF_RANGE	= 39,
179 	CQE_TX_VPORT_DISABLED		= 40,
180 	CQE_TX_VLAN_TAGGING_VIOLATION	= 41,
181 };
182 
183 #define MANA_CQE_COMPLETION 1
184 
185 struct mana_cqe_header {
186 	u32 cqe_type	: 6;
187 	u32 client_type	: 2;
188 	u32 vendor_err	: 24;
189 }; /* HW DATA */
190 
191 /* NDIS HASH Types */
192 #define NDIS_HASH_IPV4		BIT(0)
193 #define NDIS_HASH_TCP_IPV4	BIT(1)
194 #define NDIS_HASH_UDP_IPV4	BIT(2)
195 #define NDIS_HASH_IPV6		BIT(3)
196 #define NDIS_HASH_TCP_IPV6	BIT(4)
197 #define NDIS_HASH_UDP_IPV6	BIT(5)
198 #define NDIS_HASH_IPV6_EX	BIT(6)
199 #define NDIS_HASH_TCP_IPV6_EX	BIT(7)
200 #define NDIS_HASH_UDP_IPV6_EX	BIT(8)
201 
202 #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
203 #define MANA_HASH_L4                                                         \
204 	(NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 |      \
205 	 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
206 
207 struct mana_rxcomp_perpkt_info {
208 	u32 pkt_len	: 16;
209 	u32 reserved1	: 16;
210 	u32 reserved2;
211 	u32 pkt_hash;
212 }; /* HW DATA */
213 
214 #define MANA_RXCOMP_OOB_NUM_PPI 4
215 
216 /* Receive completion OOB */
217 struct mana_rxcomp_oob {
218 	struct mana_cqe_header cqe_hdr;
219 
220 	u32 rx_vlan_id			: 12;
221 	u32 rx_vlantag_present		: 1;
222 	u32 rx_outer_iphdr_csum_succeed	: 1;
223 	u32 rx_outer_iphdr_csum_fail	: 1;
224 	u32 reserved1			: 1;
225 	u32 rx_hashtype			: 9;
226 	u32 rx_iphdr_csum_succeed	: 1;
227 	u32 rx_iphdr_csum_fail		: 1;
228 	u32 rx_tcp_csum_succeed		: 1;
229 	u32 rx_tcp_csum_fail		: 1;
230 	u32 rx_udp_csum_succeed		: 1;
231 	u32 rx_udp_csum_fail		: 1;
232 	u32 reserved2			: 1;
233 
234 	struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
235 
236 	u32 rx_wqe_offset;
237 }; /* HW DATA */
238 
239 struct mana_tx_comp_oob {
240 	struct mana_cqe_header cqe_hdr;
241 
242 	u32 tx_data_offset;
243 
244 	u32 tx_sgl_offset	: 5;
245 	u32 tx_wqe_offset	: 27;
246 
247 	u32 reserved[12];
248 }; /* HW DATA */
249 
250 struct mana_rxq;
251 
252 #define CQE_POLLING_BUFFER 512
253 
254 struct mana_cq {
255 	struct gdma_queue *gdma_cq;
256 
257 	/* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
258 	u32 gdma_id;
259 
260 	/* Type of the CQ: TX or RX */
261 	enum mana_cq_type type;
262 
263 	/* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
264 	 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
265 	 */
266 	struct mana_rxq *rxq;
267 
268 	/* Pointer to the mana_txq that is pushing TX CQEs to the queue.
269 	 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
270 	 */
271 	struct mana_txq *txq;
272 
273 	/* Buffer which the CQ handler can copy the CQE's into. */
274 	struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER];
275 
276 	/* NAPI data */
277 	struct napi_struct napi;
278 	int work_done;
279 	int work_done_since_doorbell;
280 	int budget;
281 };
282 
283 struct mana_recv_buf_oob {
284 	/* A valid GDMA work request representing the data buffer. */
285 	struct gdma_wqe_request wqe_req;
286 
287 	void *buf_va;
288 	bool from_pool; /* allocated from a page pool */
289 
290 	/* SGL of the buffer going to be sent has part of the work request. */
291 	u32 num_sge;
292 	struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES];
293 
294 	/* Required to store the result of mana_gd_post_work_request.
295 	 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
296 	 * work queue when the WQE is consumed.
297 	 */
298 	struct gdma_posted_wqe_info wqe_inf;
299 };
300 
301 #define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \
302 			+ ETH_HLEN)
303 
304 #define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM)
305 
306 struct mana_rxq {
307 	struct gdma_queue *gdma_rq;
308 	/* Cache the gdma receive queue id */
309 	u32 gdma_id;
310 
311 	/* Index of RQ in the vPort, not gdma receive queue id */
312 	u32 rxq_idx;
313 
314 	u32 datasize;
315 	u32 alloc_size;
316 	u32 headroom;
317 
318 	mana_handle_t rxobj;
319 
320 	struct mana_cq rx_cq;
321 
322 	struct completion fence_event;
323 
324 	struct net_device *ndev;
325 
326 	/* Total number of receive buffers to be allocated */
327 	u32 num_rx_buf;
328 
329 	u32 buf_index;
330 
331 	struct mana_stats_rx stats;
332 
333 	struct bpf_prog __rcu *bpf_prog;
334 	struct xdp_rxq_info xdp_rxq;
335 	void *xdp_save_va; /* for reusing */
336 	bool xdp_flush;
337 	int xdp_rc; /* XDP redirect return code */
338 
339 	struct page_pool *page_pool;
340 
341 	/* MUST BE THE LAST MEMBER:
342 	 * Each receive buffer has an associated mana_recv_buf_oob.
343 	 */
344 	struct mana_recv_buf_oob rx_oobs[];
345 };
346 
347 struct mana_tx_qp {
348 	struct mana_txq txq;
349 
350 	struct mana_cq tx_cq;
351 
352 	mana_handle_t tx_object;
353 };
354 
355 struct mana_ethtool_stats {
356 	u64 stop_queue;
357 	u64 wake_queue;
358 	u64 hc_tx_bytes;
359 	u64 hc_tx_ucast_pkts;
360 	u64 hc_tx_ucast_bytes;
361 	u64 hc_tx_bcast_pkts;
362 	u64 hc_tx_bcast_bytes;
363 	u64 hc_tx_mcast_pkts;
364 	u64 hc_tx_mcast_bytes;
365 	u64 tx_cqe_err;
366 	u64 tx_cqe_unknown_type;
367 	u64 rx_coalesced_err;
368 	u64 rx_cqe_unknown_type;
369 };
370 
371 struct mana_context {
372 	struct gdma_dev *gdma_dev;
373 
374 	u16 num_ports;
375 
376 	struct mana_eq *eqs;
377 
378 	struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
379 };
380 
381 struct mana_port_context {
382 	struct mana_context *ac;
383 	struct net_device *ndev;
384 
385 	u8 mac_addr[ETH_ALEN];
386 
387 	enum TRI_STATE rss_state;
388 
389 	mana_handle_t default_rxobj;
390 	bool tx_shortform_allowed;
391 	u16 tx_vp_offset;
392 
393 	struct mana_tx_qp *tx_qp;
394 
395 	/* Indirection Table for RX & TX. The values are queue indexes */
396 	u32 indir_table[MANA_INDIRECT_TABLE_SIZE];
397 
398 	/* Indirection table containing RxObject Handles */
399 	mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE];
400 
401 	/*  Hash key used by the NIC */
402 	u8 hashkey[MANA_HASH_KEY_SIZE];
403 
404 	/* This points to an array of num_queues of RQ pointers. */
405 	struct mana_rxq **rxqs;
406 
407 	/* pre-allocated rx buffer array */
408 	void **rxbufs_pre;
409 	dma_addr_t *das_pre;
410 	int rxbpre_total;
411 	u32 rxbpre_datasize;
412 	u32 rxbpre_alloc_size;
413 	u32 rxbpre_headroom;
414 
415 	struct bpf_prog *bpf_prog;
416 
417 	/* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
418 	unsigned int max_queues;
419 	unsigned int num_queues;
420 
421 	mana_handle_t port_handle;
422 	mana_handle_t pf_filter_handle;
423 
424 	/* Mutex for sharing access to vport_use_count */
425 	struct mutex vport_mutex;
426 	int vport_use_count;
427 
428 	u16 port_idx;
429 
430 	bool port_is_up;
431 	bool port_st_save; /* Saved port state */
432 
433 	struct mana_ethtool_stats eth_stats;
434 };
435 
436 netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
437 int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
438 		    bool update_hash, bool update_tab);
439 
440 int mana_alloc_queues(struct net_device *ndev);
441 int mana_attach(struct net_device *ndev);
442 int mana_detach(struct net_device *ndev, bool from_close);
443 
444 int mana_probe(struct gdma_dev *gd, bool resuming);
445 void mana_remove(struct gdma_dev *gd, bool suspending);
446 
447 void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
448 int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
449 		  u32 flags);
450 u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
451 		 struct xdp_buff *xdp, void *buf_va, uint pkt_len);
452 struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
453 void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
454 int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
455 void mana_query_gf_stats(struct mana_port_context *apc);
456 
457 extern const struct ethtool_ops mana_ethtool_ops;
458 
459 /* A CQ can be created not associated with any EQ */
460 #define GDMA_CQ_NO_EQ  0xffff
461 
462 struct mana_obj_spec {
463 	u32 queue_index;
464 	u64 gdma_region;
465 	u32 queue_size;
466 	u32 attached_eq;
467 	u32 modr_ctx_id;
468 };
469 
470 enum mana_command_code {
471 	MANA_QUERY_DEV_CONFIG	= 0x20001,
472 	MANA_QUERY_GF_STAT	= 0x20002,
473 	MANA_CONFIG_VPORT_TX	= 0x20003,
474 	MANA_CREATE_WQ_OBJ	= 0x20004,
475 	MANA_DESTROY_WQ_OBJ	= 0x20005,
476 	MANA_FENCE_RQ		= 0x20006,
477 	MANA_CONFIG_VPORT_RX	= 0x20007,
478 	MANA_QUERY_VPORT_CONFIG	= 0x20008,
479 
480 	/* Privileged commands for the PF mode */
481 	MANA_REGISTER_FILTER	= 0x28000,
482 	MANA_DEREGISTER_FILTER	= 0x28001,
483 	MANA_REGISTER_HW_PORT	= 0x28003,
484 	MANA_DEREGISTER_HW_PORT	= 0x28004,
485 };
486 
487 /* Query Device Configuration */
488 struct mana_query_device_cfg_req {
489 	struct gdma_req_hdr hdr;
490 
491 	/* MANA Nic Driver Capability flags */
492 	u64 mn_drv_cap_flags1;
493 	u64 mn_drv_cap_flags2;
494 	u64 mn_drv_cap_flags3;
495 	u64 mn_drv_cap_flags4;
496 
497 	u32 proto_major_ver;
498 	u32 proto_minor_ver;
499 	u32 proto_micro_ver;
500 
501 	u32 reserved;
502 }; /* HW DATA */
503 
504 struct mana_query_device_cfg_resp {
505 	struct gdma_resp_hdr hdr;
506 
507 	u64 pf_cap_flags1;
508 	u64 pf_cap_flags2;
509 	u64 pf_cap_flags3;
510 	u64 pf_cap_flags4;
511 
512 	u16 max_num_vports;
513 	u16 reserved;
514 	u32 max_num_eqs;
515 
516 	/* response v2: */
517 	u16 adapter_mtu;
518 	u16 reserved2;
519 	u32 reserved3;
520 }; /* HW DATA */
521 
522 /* Query vPort Configuration */
523 struct mana_query_vport_cfg_req {
524 	struct gdma_req_hdr hdr;
525 	u32 vport_index;
526 }; /* HW DATA */
527 
528 struct mana_query_vport_cfg_resp {
529 	struct gdma_resp_hdr hdr;
530 	u32 max_num_sq;
531 	u32 max_num_rq;
532 	u32 num_indirection_ent;
533 	u32 reserved1;
534 	u8 mac_addr[6];
535 	u8 reserved2[2];
536 	mana_handle_t vport;
537 }; /* HW DATA */
538 
539 /* Configure vPort */
540 struct mana_config_vport_req {
541 	struct gdma_req_hdr hdr;
542 	mana_handle_t vport;
543 	u32 pdid;
544 	u32 doorbell_pageid;
545 }; /* HW DATA */
546 
547 struct mana_config_vport_resp {
548 	struct gdma_resp_hdr hdr;
549 	u16 tx_vport_offset;
550 	u8 short_form_allowed;
551 	u8 reserved;
552 }; /* HW DATA */
553 
554 /* Create WQ Object */
555 struct mana_create_wqobj_req {
556 	struct gdma_req_hdr hdr;
557 	mana_handle_t vport;
558 	u32 wq_type;
559 	u32 reserved;
560 	u64 wq_gdma_region;
561 	u64 cq_gdma_region;
562 	u32 wq_size;
563 	u32 cq_size;
564 	u32 cq_moderation_ctx_id;
565 	u32 cq_parent_qid;
566 }; /* HW DATA */
567 
568 struct mana_create_wqobj_resp {
569 	struct gdma_resp_hdr hdr;
570 	u32 wq_id;
571 	u32 cq_id;
572 	mana_handle_t wq_obj;
573 }; /* HW DATA */
574 
575 /* Destroy WQ Object */
576 struct mana_destroy_wqobj_req {
577 	struct gdma_req_hdr hdr;
578 	u32 wq_type;
579 	u32 reserved;
580 	mana_handle_t wq_obj_handle;
581 }; /* HW DATA */
582 
583 struct mana_destroy_wqobj_resp {
584 	struct gdma_resp_hdr hdr;
585 }; /* HW DATA */
586 
587 /* Fence RQ */
588 struct mana_fence_rq_req {
589 	struct gdma_req_hdr hdr;
590 	mana_handle_t wq_obj_handle;
591 }; /* HW DATA */
592 
593 struct mana_fence_rq_resp {
594 	struct gdma_resp_hdr hdr;
595 }; /* HW DATA */
596 
597 /* Query stats RQ */
598 struct mana_query_gf_stat_req {
599 	struct gdma_req_hdr hdr;
600 	u64 req_stats;
601 }; /* HW DATA */
602 
603 struct mana_query_gf_stat_resp {
604 	struct gdma_resp_hdr hdr;
605 	u64 reported_stats;
606 	/* rx errors/discards */
607 	u64 discard_rx_nowqe;
608 	u64 err_rx_vport_disabled;
609 	/* rx bytes/packets */
610 	u64 hc_rx_bytes;
611 	u64 hc_rx_ucast_pkts;
612 	u64 hc_rx_ucast_bytes;
613 	u64 hc_rx_bcast_pkts;
614 	u64 hc_rx_bcast_bytes;
615 	u64 hc_rx_mcast_pkts;
616 	u64 hc_rx_mcast_bytes;
617 	/* tx errors */
618 	u64 err_tx_gf_disabled;
619 	u64 err_tx_vport_disabled;
620 	u64 err_tx_inval_vport_offset_pkt;
621 	u64 err_tx_vlan_enforcement;
622 	u64 err_tx_ethtype_enforcement;
623 	u64 err_tx_SA_enforecement;
624 	u64 err_tx_SQPDID_enforcement;
625 	u64 err_tx_CQPDID_enforcement;
626 	u64 err_tx_mtu_violation;
627 	u64 err_tx_inval_oob;
628 	/* tx bytes/packets */
629 	u64 hc_tx_bytes;
630 	u64 hc_tx_ucast_pkts;
631 	u64 hc_tx_ucast_bytes;
632 	u64 hc_tx_bcast_pkts;
633 	u64 hc_tx_bcast_bytes;
634 	u64 hc_tx_mcast_pkts;
635 	u64 hc_tx_mcast_bytes;
636 	/* tx error */
637 	u64 err_tx_gdma;
638 }; /* HW DATA */
639 
640 /* Configure vPort Rx Steering */
641 struct mana_cfg_rx_steer_req_v2 {
642 	struct gdma_req_hdr hdr;
643 	mana_handle_t vport;
644 	u16 num_indir_entries;
645 	u16 indir_tab_offset;
646 	u32 rx_enable;
647 	u32 rss_enable;
648 	u8 update_default_rxobj;
649 	u8 update_hashkey;
650 	u8 update_indir_tab;
651 	u8 reserved;
652 	mana_handle_t default_rxobj;
653 	u8 hashkey[MANA_HASH_KEY_SIZE];
654 	u8 cqe_coalescing_enable;
655 	u8 reserved2[7];
656 }; /* HW DATA */
657 
658 struct mana_cfg_rx_steer_resp {
659 	struct gdma_resp_hdr hdr;
660 }; /* HW DATA */
661 
662 /* Register HW vPort */
663 struct mana_register_hw_vport_req {
664 	struct gdma_req_hdr hdr;
665 	u16 attached_gfid;
666 	u8 is_pf_default_vport;
667 	u8 reserved1;
668 	u8 allow_all_ether_types;
669 	u8 reserved2;
670 	u8 reserved3;
671 	u8 reserved4;
672 }; /* HW DATA */
673 
674 struct mana_register_hw_vport_resp {
675 	struct gdma_resp_hdr hdr;
676 	mana_handle_t hw_vport_handle;
677 }; /* HW DATA */
678 
679 /* Deregister HW vPort */
680 struct mana_deregister_hw_vport_req {
681 	struct gdma_req_hdr hdr;
682 	mana_handle_t hw_vport_handle;
683 }; /* HW DATA */
684 
685 struct mana_deregister_hw_vport_resp {
686 	struct gdma_resp_hdr hdr;
687 }; /* HW DATA */
688 
689 /* Register filter */
690 struct mana_register_filter_req {
691 	struct gdma_req_hdr hdr;
692 	mana_handle_t vport;
693 	u8 mac_addr[6];
694 	u8 reserved1;
695 	u8 reserved2;
696 	u8 reserved3;
697 	u8 reserved4;
698 	u16 reserved5;
699 	u32 reserved6;
700 	u32 reserved7;
701 	u32 reserved8;
702 }; /* HW DATA */
703 
704 struct mana_register_filter_resp {
705 	struct gdma_resp_hdr hdr;
706 	mana_handle_t filter_handle;
707 }; /* HW DATA */
708 
709 /* Deregister filter */
710 struct mana_deregister_filter_req {
711 	struct gdma_req_hdr hdr;
712 	mana_handle_t filter_handle;
713 }; /* HW DATA */
714 
715 struct mana_deregister_filter_resp {
716 	struct gdma_resp_hdr hdr;
717 }; /* HW DATA */
718 
719 /* Requested GF stats Flags */
720 /* Rx discards/Errors */
721 #define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE		0x0000000000000001
722 #define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED	0x0000000000000002
723 /* Rx bytes/pkts */
724 #define STATISTICS_FLAGS_HC_RX_BYTES			0x0000000000000004
725 #define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS		0x0000000000000008
726 #define STATISTICS_FLAGS_HC_RX_UCAST_BYTES		0x0000000000000010
727 #define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS		0x0000000000000020
728 #define STATISTICS_FLAGS_HC_RX_MCAST_BYTES		0x0000000000000040
729 #define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS		0x0000000000000080
730 #define STATISTICS_FLAGS_HC_RX_BCAST_BYTES		0x0000000000000100
731 /* Tx errors */
732 #define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED		0x0000000000000200
733 #define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED	0x0000000000000400
734 #define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS		\
735 							0x0000000000000800
736 #define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT	0x0000000000001000
737 #define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT			\
738 							0x0000000000002000
739 #define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT	0x0000000000004000
740 #define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT	0x0000000000008000
741 #define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT	0x0000000000010000
742 #define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION	0x0000000000020000
743 #define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB		0x0000000000040000
744 /* Tx bytes/pkts */
745 #define STATISTICS_FLAGS_HC_TX_BYTES			0x0000000000080000
746 #define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS		0x0000000000100000
747 #define STATISTICS_FLAGS_HC_TX_UCAST_BYTES		0x0000000000200000
748 #define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS		0x0000000000400000
749 #define STATISTICS_FLAGS_HC_TX_MCAST_BYTES		0x0000000000800000
750 #define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS		0x0000000001000000
751 #define STATISTICS_FLAGS_HC_TX_BCAST_BYTES		0x0000000002000000
752 /* Tx error */
753 #define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR		0x0000000004000000
754 
755 #define MANA_MAX_NUM_QUEUES 64
756 
757 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
758 
759 struct mana_tx_package {
760 	struct gdma_wqe_request wqe_req;
761 	struct gdma_sge sgl_array[5];
762 	struct gdma_sge *sgl_ptr;
763 
764 	struct mana_tx_oob tx_oob;
765 
766 	struct gdma_posted_wqe_info wqe_info;
767 };
768 
769 int mana_create_wq_obj(struct mana_port_context *apc,
770 		       mana_handle_t vport,
771 		       u32 wq_type, struct mana_obj_spec *wq_spec,
772 		       struct mana_obj_spec *cq_spec,
773 		       mana_handle_t *wq_obj);
774 
775 void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
776 			 mana_handle_t wq_obj);
777 
778 int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
779 		   u32 doorbell_pg_id);
780 void mana_uncfg_vport(struct mana_port_context *apc);
781 #endif /* _MANA_H */
782