1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #ifndef _ICE_LAN_TX_RX_H_
5 #define _ICE_LAN_TX_RX_H_
6 
7 union ice_32byte_rx_desc {
8 	struct {
9 		__le64  pkt_addr; /* Packet buffer address */
10 		__le64  hdr_addr; /* Header buffer address */
11 			/* bit 0 of hdr_addr is DD bit */
12 		__le64  rsvd1;
13 		__le64  rsvd2;
14 	} read;
15 	struct {
16 		struct {
17 			struct {
18 				__le16 mirroring_status;
19 				__le16 l2tag1;
20 			} lo_dword;
21 			union {
22 				__le32 rss; /* RSS Hash */
23 				__le32 fd_id; /* Flow Director filter id */
24 			} hi_dword;
25 		} qword0;
26 		struct {
27 			/* status/error/PTYPE/length */
28 			__le64 status_error_len;
29 		} qword1;
30 		struct {
31 			__le16 ext_status; /* extended status */
32 			__le16 rsvd;
33 			__le16 l2tag2_1;
34 			__le16 l2tag2_2;
35 		} qword2;
36 		struct {
37 			__le32 reserved;
38 			__le32 fd_id;
39 		} qword3;
40 	} wb; /* writeback */
41 };
42 
43 struct ice_rx_ptype_decoded {
44 	u32 ptype:10;
45 	u32 known:1;
46 	u32 outer_ip:1;
47 	u32 outer_ip_ver:2;
48 	u32 outer_frag:1;
49 	u32 tunnel_type:3;
50 	u32 tunnel_end_prot:2;
51 	u32 tunnel_end_frag:1;
52 	u32 inner_prot:4;
53 	u32 payload_layer:3;
54 };
55 
56 enum ice_rx_ptype_outer_ip {
57 	ICE_RX_PTYPE_OUTER_L2	= 0,
58 	ICE_RX_PTYPE_OUTER_IP	= 1,
59 };
60 
61 enum ice_rx_ptype_outer_ip_ver {
62 	ICE_RX_PTYPE_OUTER_NONE	= 0,
63 	ICE_RX_PTYPE_OUTER_IPV4	= 1,
64 	ICE_RX_PTYPE_OUTER_IPV6	= 2,
65 };
66 
67 enum ice_rx_ptype_outer_fragmented {
68 	ICE_RX_PTYPE_NOT_FRAG	= 0,
69 	ICE_RX_PTYPE_FRAG	= 1,
70 };
71 
72 enum ice_rx_ptype_tunnel_type {
73 	ICE_RX_PTYPE_TUNNEL_NONE		= 0,
74 	ICE_RX_PTYPE_TUNNEL_IP_IP		= 1,
75 	ICE_RX_PTYPE_TUNNEL_IP_GRENAT		= 2,
76 	ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC	= 3,
77 	ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN	= 4,
78 };
79 
80 enum ice_rx_ptype_tunnel_end_prot {
81 	ICE_RX_PTYPE_TUNNEL_END_NONE	= 0,
82 	ICE_RX_PTYPE_TUNNEL_END_IPV4	= 1,
83 	ICE_RX_PTYPE_TUNNEL_END_IPV6	= 2,
84 };
85 
86 enum ice_rx_ptype_inner_prot {
87 	ICE_RX_PTYPE_INNER_PROT_NONE		= 0,
88 	ICE_RX_PTYPE_INNER_PROT_UDP		= 1,
89 	ICE_RX_PTYPE_INNER_PROT_TCP		= 2,
90 	ICE_RX_PTYPE_INNER_PROT_SCTP		= 3,
91 	ICE_RX_PTYPE_INNER_PROT_ICMP		= 4,
92 	ICE_RX_PTYPE_INNER_PROT_TIMESYNC	= 5,
93 };
94 
95 enum ice_rx_ptype_payload_layer {
96 	ICE_RX_PTYPE_PAYLOAD_LAYER_NONE	= 0,
97 	ICE_RX_PTYPE_PAYLOAD_LAYER_PAY2	= 1,
98 	ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3	= 2,
99 	ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4	= 3,
100 };
101 
102 /* RX Flex Descriptor
103  * This descriptor is used instead of the legacy version descriptor when
104  * ice_rlan_ctx.adv_desc is set
105  */
106 union ice_32b_rx_flex_desc {
107 	struct {
108 		__le64  pkt_addr; /* Packet buffer address */
109 		__le64  hdr_addr; /* Header buffer address */
110 				  /* bit 0 of hdr_addr is DD bit */
111 		__le64  rsvd1;
112 		__le64  rsvd2;
113 	} read;
114 	struct {
115 		/* Qword 0 */
116 		u8 rxdid; /* descriptor builder profile id */
117 		u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
118 		__le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
119 		__le16 pkt_len; /* [15:14] are reserved */
120 		__le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
121 						/* sph=[11:11] */
122 						/* ff1/ext=[15:12] */
123 
124 		/* Qword 1 */
125 		__le16 status_error0;
126 		__le16 l2tag1;
127 		__le16 flex_meta0;
128 		__le16 flex_meta1;
129 
130 		/* Qword 2 */
131 		__le16 status_error1;
132 		u8 flex_flags2;
133 		u8 time_stamp_low;
134 		__le16 l2tag2_1st;
135 		__le16 l2tag2_2nd;
136 
137 		/* Qword 3 */
138 		__le16 flex_meta2;
139 		__le16 flex_meta3;
140 		union {
141 			struct {
142 				__le16 flex_meta4;
143 				__le16 flex_meta5;
144 			} flex;
145 			__le32 ts_high;
146 		} flex_ts;
147 	} wb; /* writeback */
148 };
149 
150 /* Rx Flex Descriptor NIC Profile
151  * This descriptor corresponds to RxDID 2 which contains
152  * metadata fields for RSS, flow id and timestamp info
153  */
154 struct ice_32b_rx_flex_desc_nic {
155 	/* Qword 0 */
156 	u8 rxdid;
157 	u8 mir_id_umb_cast;
158 	__le16 ptype_flexi_flags0;
159 	__le16 pkt_len;
160 	__le16 hdr_len_sph_flex_flags1;
161 
162 	/* Qword 1 */
163 	__le16 status_error0;
164 	__le16 l2tag1;
165 	__le32 rss_hash;
166 
167 	/* Qword 2 */
168 	__le16 status_error1;
169 	u8 flexi_flags2;
170 	u8 ts_low;
171 	__le16 l2tag2_1st;
172 	__le16 l2tag2_2nd;
173 
174 	/* Qword 3 */
175 	__le32 flow_id;
176 	union {
177 		struct {
178 			__le16 vlan_id;
179 			__le16 flow_id_ipv6;
180 		} flex;
181 		__le32 ts_high;
182 	} flex_ts;
183 };
184 
185 /* Receive Flex Descriptor profile IDs: There are a total
186  * of 64 profiles where profile IDs 0/1 are for legacy; and
187  * profiles 2-63 are flex profiles that can be programmed
188  * with a specific metadata (profile 7 reserved for HW)
189  */
190 enum ice_rxdid {
191 	ICE_RXDID_START			= 0,
192 	ICE_RXDID_LEGACY_0		= ICE_RXDID_START,
193 	ICE_RXDID_LEGACY_1,
194 	ICE_RXDID_FLX_START,
195 	ICE_RXDID_FLEX_NIC		= ICE_RXDID_FLX_START,
196 	ICE_RXDID_FLX_LAST		= 63,
197 	ICE_RXDID_LAST			= ICE_RXDID_FLX_LAST
198 };
199 
200 /* Receive Flex Descriptor Rx opcode values */
201 #define ICE_RX_OPC_MDID		0x01
202 
203 /* Receive Descriptor MDID values */
204 #define ICE_RX_MDID_FLOW_ID_LOWER	5
205 #define ICE_RX_MDID_FLOW_ID_HIGH	6
206 #define ICE_RX_MDID_HASH_LOW		56
207 #define ICE_RX_MDID_HASH_HIGH		57
208 
209 /* Rx Flag64 packet flag bits */
210 enum ice_rx_flg64_bits {
211 	ICE_RXFLG_PKT_DSI	= 0,
212 	ICE_RXFLG_EVLAN_x8100	= 15,
213 	ICE_RXFLG_EVLAN_x9100,
214 	ICE_RXFLG_VLAN_x8100,
215 	ICE_RXFLG_TNL_MAC	= 22,
216 	ICE_RXFLG_TNL_VLAN,
217 	ICE_RXFLG_PKT_FRG,
218 	ICE_RXFLG_FIN		= 32,
219 	ICE_RXFLG_SYN,
220 	ICE_RXFLG_RST,
221 	ICE_RXFLG_TNL0		= 38,
222 	ICE_RXFLG_TNL1,
223 	ICE_RXFLG_TNL2,
224 	ICE_RXFLG_UDP_GRE,
225 	ICE_RXFLG_RSVD		= 63
226 };
227 
228 /* for ice_32byte_rx_flex_desc.ptype_flexi_flags0 member */
229 #define ICE_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
230 
231 /* for ice_32byte_rx_flex_desc.pkt_length member */
232 #define ICE_RX_FLX_DESC_PKT_LEN_M	(0x3FFF) /* 14-bits */
233 
234 enum ice_rx_flex_desc_status_error_0_bits {
235 	/* Note: These are predefined bit offsets */
236 	ICE_RX_FLEX_DESC_STATUS0_DD_S = 0,
237 	ICE_RX_FLEX_DESC_STATUS0_EOF_S,
238 	ICE_RX_FLEX_DESC_STATUS0_HBO_S,
239 	ICE_RX_FLEX_DESC_STATUS0_L3L4P_S,
240 	ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S,
241 	ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
242 	ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
243 	ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
244 	ICE_RX_FLEX_DESC_STATUS0_LPBK_S,
245 	ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
246 	ICE_RX_FLEX_DESC_STATUS0_RXE_S,
247 	ICE_RX_FLEX_DESC_STATUS0_CRCP_S,
248 	ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S,
249 	ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S,
250 	ICE_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S,
251 	ICE_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S,
252 	ICE_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
253 };
254 
255 #define ICE_RXQ_CTX_SIZE_DWORDS		8
256 #define ICE_RXQ_CTX_SZ			(ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
257 
258 /* RLAN Rx queue context data
259  *
260  * The sizes of the variables may be larger than needed due to crossing byte
261  * boundaries. If we do not have the width of the variable set to the correct
262  * size then we could end up shifting bits off the top of the variable when the
263  * variable is at the top of a byte and crosses over into the next byte.
264  */
265 struct ice_rlan_ctx {
266 	u16 head;
267 	u16 cpuid; /* bigger than needed, see above for reason */
268 	u64 base;
269 	u16 qlen;
270 #define ICE_RLAN_CTX_DBUF_S 7
271 	u16 dbuf; /* bigger than needed, see above for reason */
272 #define ICE_RLAN_CTX_HBUF_S 6
273 	u16 hbuf; /* bigger than needed, see above for reason */
274 	u8  dtype;
275 	u8  dsize;
276 	u8  crcstrip;
277 	u8  l2tsel;
278 	u8  hsplit_0;
279 	u8  hsplit_1;
280 	u8  showiv;
281 	u32 rxmax; /* bigger than needed, see above for reason */
282 	u8  tphrdesc_ena;
283 	u8  tphwdesc_ena;
284 	u8  tphdata_ena;
285 	u8  tphhead_ena;
286 	u16 lrxqthresh; /* bigger than needed, see above for reason */
287 };
288 
289 struct ice_ctx_ele {
290 	u16 offset;
291 	u16 size_of;
292 	u16 width;
293 	u16 lsb;
294 };
295 
296 #define ICE_CTX_STORE(_struct, _ele, _width, _lsb) {	\
297 	.offset = offsetof(struct _struct, _ele),	\
298 	.size_of = FIELD_SIZEOF(struct _struct, _ele),	\
299 	.width = _width,				\
300 	.lsb = _lsb,					\
301 }
302 
303 /* for hsplit_0 field of Rx RLAN context */
304 enum ice_rlan_ctx_rx_hsplit_0 {
305 	ICE_RLAN_RX_HSPLIT_0_NO_SPLIT		= 0,
306 	ICE_RLAN_RX_HSPLIT_0_SPLIT_L2		= 1,
307 	ICE_RLAN_RX_HSPLIT_0_SPLIT_IP		= 2,
308 	ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP	= 4,
309 	ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP		= 8,
310 };
311 
312 /* for hsplit_1 field of Rx RLAN context */
313 enum ice_rlan_ctx_rx_hsplit_1 {
314 	ICE_RLAN_RX_HSPLIT_1_NO_SPLIT		= 0,
315 	ICE_RLAN_RX_HSPLIT_1_SPLIT_L2		= 1,
316 	ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS	= 2,
317 };
318 
319 /* TX Descriptor */
320 struct ice_tx_desc {
321 	__le64 buf_addr; /* Address of descriptor's data buf */
322 	__le64 cmd_type_offset_bsz;
323 };
324 
325 enum ice_tx_desc_dtype_value {
326 	ICE_TX_DESC_DTYPE_DATA		= 0x0,
327 	ICE_TX_DESC_DTYPE_CTX		= 0x1,
328 	/* DESC_DONE - HW has completed write-back of descriptor */
329 	ICE_TX_DESC_DTYPE_DESC_DONE	= 0xF,
330 };
331 
332 #define ICE_TXD_QW1_CMD_S	4
333 #define ICE_TXD_QW1_CMD_M	(0xFFFUL << ICE_TXD_QW1_CMD_S)
334 
335 enum ice_tx_desc_cmd_bits {
336 	ICE_TX_DESC_CMD_EOP			= 0x0001,
337 	ICE_TX_DESC_CMD_RS			= 0x0002,
338 	ICE_TX_DESC_CMD_IL2TAG1			= 0x0008,
339 	ICE_TX_DESC_CMD_IIPT_IPV6		= 0x0020, /* 2 BITS */
340 	ICE_TX_DESC_CMD_IIPT_IPV4		= 0x0040, /* 2 BITS */
341 	ICE_TX_DESC_CMD_IIPT_IPV4_CSUM		= 0x0060, /* 2 BITS */
342 	ICE_TX_DESC_CMD_L4T_EOFT_TCP		= 0x0100, /* 2 BITS */
343 	ICE_TX_DESC_CMD_L4T_EOFT_UDP		= 0x0300, /* 2 BITS */
344 };
345 
346 #define ICE_TXD_QW1_OFFSET_S	16
347 #define ICE_TXD_QW1_OFFSET_M	(0x3FFFFULL << ICE_TXD_QW1_OFFSET_S)
348 
349 enum ice_tx_desc_len_fields {
350 	/* Note: These are predefined bit offsets */
351 	ICE_TX_DESC_LEN_MACLEN_S	= 0, /* 7 BITS */
352 	ICE_TX_DESC_LEN_IPLEN_S	= 7, /* 7 BITS */
353 	ICE_TX_DESC_LEN_L4_LEN_S	= 14 /* 4 BITS */
354 };
355 
356 #define ICE_TXD_QW1_MACLEN_M (0x7FUL << ICE_TX_DESC_LEN_MACLEN_S)
357 #define ICE_TXD_QW1_IPLEN_M  (0x7FUL << ICE_TX_DESC_LEN_IPLEN_S)
358 #define ICE_TXD_QW1_L4LEN_M  (0xFUL << ICE_TX_DESC_LEN_L4_LEN_S)
359 
360 /* Tx descriptor field limits in bytes */
361 #define ICE_TXD_MACLEN_MAX ((ICE_TXD_QW1_MACLEN_M >> \
362 			     ICE_TX_DESC_LEN_MACLEN_S) * ICE_BYTES_PER_WORD)
363 #define ICE_TXD_IPLEN_MAX ((ICE_TXD_QW1_IPLEN_M >> \
364 			    ICE_TX_DESC_LEN_IPLEN_S) * ICE_BYTES_PER_DWORD)
365 #define ICE_TXD_L4LEN_MAX ((ICE_TXD_QW1_L4LEN_M >> \
366 			    ICE_TX_DESC_LEN_L4_LEN_S) * ICE_BYTES_PER_DWORD)
367 
368 #define ICE_TXD_QW1_TX_BUF_SZ_S	34
369 #define ICE_TXD_QW1_L2TAG1_S	48
370 
371 /* Context descriptors */
372 struct ice_tx_ctx_desc {
373 	__le32 tunneling_params;
374 	__le16 l2tag2;
375 	__le16 rsvd;
376 	__le64 qw1;
377 };
378 
379 #define ICE_TXD_CTX_QW1_CMD_S	4
380 #define ICE_TXD_CTX_QW1_CMD_M	(0x7FUL << ICE_TXD_CTX_QW1_CMD_S)
381 
382 #define ICE_TXD_CTX_QW1_TSO_LEN_S	30
383 #define ICE_TXD_CTX_QW1_TSO_LEN_M	\
384 			(0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S)
385 
386 #define ICE_TXD_CTX_QW1_MSS_S	50
387 
388 enum ice_tx_ctx_desc_cmd_bits {
389 	ICE_TX_CTX_DESC_TSO		= 0x01,
390 	ICE_TX_CTX_DESC_TSYN		= 0x02,
391 	ICE_TX_CTX_DESC_IL2TAG2		= 0x04,
392 	ICE_TX_CTX_DESC_IL2TAG2_IL2H	= 0x08,
393 	ICE_TX_CTX_DESC_SWTCH_NOTAG	= 0x00,
394 	ICE_TX_CTX_DESC_SWTCH_UPLINK	= 0x10,
395 	ICE_TX_CTX_DESC_SWTCH_LOCAL	= 0x20,
396 	ICE_TX_CTX_DESC_SWTCH_VSI	= 0x30,
397 	ICE_TX_CTX_DESC_RESERVED	= 0x40
398 };
399 
400 #define ICE_LAN_TXQ_MAX_QGRPS	127
401 #define ICE_LAN_TXQ_MAX_QDIS	1023
402 
403 /* Tx queue context data
404  *
405  * The sizes of the variables may be larger than needed due to crossing byte
406  * boundaries. If we do not have the width of the variable set to the correct
407  * size then we could end up shifting bits off the top of the variable when the
408  * variable is at the top of a byte and crosses over into the next byte.
409  */
410 struct ice_tlan_ctx {
411 #define ICE_TLAN_CTX_BASE_S	7
412 	u64 base;		/* base is defined in 128-byte units */
413 	u8  port_num;
414 	u16 cgd_num;		/* bigger than needed, see above for reason */
415 	u8  pf_num;
416 	u16 vmvf_num;
417 	u8  vmvf_type;
418 #define ICE_TLAN_CTX_VMVF_TYPE_VMQ	1
419 #define ICE_TLAN_CTX_VMVF_TYPE_PF	2
420 	u16 src_vsi;
421 	u8  tsyn_ena;
422 	u8  alt_vlan;
423 	u16 cpuid;		/* bigger than needed, see above for reason */
424 	u8  wb_mode;
425 	u8  tphrd_desc;
426 	u8  tphrd;
427 	u8  tphwr_desc;
428 	u16 cmpq_id;
429 	u16 qnum_in_func;
430 	u8  itr_notification_mode;
431 	u8  adjust_prof_id;
432 	u32 qlen;		/* bigger than needed, see above for reason */
433 	u8  quanta_prof_idx;
434 	u8  tso_ena;
435 	u16 tso_qnum;
436 	u8  legacy_int;
437 	u8  drop_ena;
438 	u8  cache_prof_idx;
439 	u8  pkt_shaper_prof_idx;
440 	u8  int_q_state;	/* width not needed - internal do not write */
441 };
442 
443 /* macro to make the table lines short */
444 #define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
445 	{	PTYPE, \
446 		1, \
447 		ICE_RX_PTYPE_OUTER_##OUTER_IP, \
448 		ICE_RX_PTYPE_OUTER_##OUTER_IP_VER, \
449 		ICE_RX_PTYPE_##OUTER_FRAG, \
450 		ICE_RX_PTYPE_TUNNEL_##T, \
451 		ICE_RX_PTYPE_TUNNEL_END_##TE, \
452 		ICE_RX_PTYPE_##TEF, \
453 		ICE_RX_PTYPE_INNER_PROT_##I, \
454 		ICE_RX_PTYPE_PAYLOAD_LAYER_##PL }
455 
456 #define ICE_PTT_UNUSED_ENTRY(PTYPE) { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
457 
458 /* shorter macros makes the table fit but are terse */
459 #define ICE_RX_PTYPE_NOF		ICE_RX_PTYPE_NOT_FRAG
460 
461 /* Lookup table mapping the HW PTYPE to the bit field for decoding */
462 static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
463 	/* L2 Packet types */
464 	ICE_PTT_UNUSED_ENTRY(0),
465 	ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
466 	ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
467 };
468 
469 static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
470 {
471 	return ice_ptype_lkup[ptype];
472 }
473 #endif /* _ICE_LAN_TX_RX_H_ */
474