1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #include "ice_common.h"
5 #include "ice_flow.h"
6 #include <net/gre.h>
7 
8 /* Describe properties of a protocol header field */
9 struct ice_flow_field_info {
10 	enum ice_flow_seg_hdr hdr;
11 	s16 off;	/* Offset from start of a protocol header, in bits */
12 	u16 size;	/* Size of fields in bits */
13 	u16 mask;	/* 16-bit mask for field */
14 };
15 
16 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
17 	.hdr = _hdr, \
18 	.off = (_offset_bytes) * BITS_PER_BYTE, \
19 	.size = (_size_bytes) * BITS_PER_BYTE, \
20 	.mask = 0, \
21 }
22 
23 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
24 	.hdr = _hdr, \
25 	.off = (_offset_bytes) * BITS_PER_BYTE, \
26 	.size = (_size_bytes) * BITS_PER_BYTE, \
27 	.mask = _mask, \
28 }
29 
30 /* Table containing properties of supported protocol header fields */
31 static const
32 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
33 	/* Ether */
34 	/* ICE_FLOW_FIELD_IDX_ETH_DA */
35 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
36 	/* ICE_FLOW_FIELD_IDX_ETH_SA */
37 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
38 	/* ICE_FLOW_FIELD_IDX_S_VLAN */
39 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, sizeof(__be16)),
40 	/* ICE_FLOW_FIELD_IDX_C_VLAN */
41 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, sizeof(__be16)),
42 	/* ICE_FLOW_FIELD_IDX_ETH_TYPE */
43 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, sizeof(__be16)),
44 	/* IPv4 / IPv6 */
45 	/* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
46 	ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, 1, 0x00fc),
47 	/* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
48 	ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, 1, 0x0ff0),
49 	/* ICE_FLOW_FIELD_IDX_IPV4_TTL */
50 	ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, 1, 0xff00),
51 	/* ICE_FLOW_FIELD_IDX_IPV4_PROT */
52 	ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, 1, 0x00ff),
53 	/* ICE_FLOW_FIELD_IDX_IPV6_TTL */
54 	ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, 1, 0x00ff),
55 	/* ICE_FLOW_FIELD_IDX_IPV6_PROT */
56 	ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, 1, 0xff00),
57 	/* ICE_FLOW_FIELD_IDX_IPV4_SA */
58 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, sizeof(struct in_addr)),
59 	/* ICE_FLOW_FIELD_IDX_IPV4_DA */
60 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, sizeof(struct in_addr)),
61 	/* ICE_FLOW_FIELD_IDX_IPV6_SA */
62 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, sizeof(struct in6_addr)),
63 	/* ICE_FLOW_FIELD_IDX_IPV6_DA */
64 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, sizeof(struct in6_addr)),
65 	/* Transport */
66 	/* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
67 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, sizeof(__be16)),
68 	/* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
69 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, sizeof(__be16)),
70 	/* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
71 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, sizeof(__be16)),
72 	/* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
73 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, sizeof(__be16)),
74 	/* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
75 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)),
76 	/* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
77 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
78 	/* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
79 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, 1),
80 	/* ARP */
81 	/* ICE_FLOW_FIELD_IDX_ARP_SIP */
82 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, sizeof(struct in_addr)),
83 	/* ICE_FLOW_FIELD_IDX_ARP_DIP */
84 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, sizeof(struct in_addr)),
85 	/* ICE_FLOW_FIELD_IDX_ARP_SHA */
86 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
87 	/* ICE_FLOW_FIELD_IDX_ARP_DHA */
88 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
89 	/* ICE_FLOW_FIELD_IDX_ARP_OP */
90 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, sizeof(__be16)),
91 	/* ICMP */
92 	/* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
93 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, 1),
94 	/* ICE_FLOW_FIELD_IDX_ICMP_CODE */
95 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, 1),
96 	/* GRE */
97 	/* ICE_FLOW_FIELD_IDX_GRE_KEYID */
98 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12,
99 			  sizeof_field(struct gre_full_hdr, key)),
100 	/* GTP */
101 	/* ICE_FLOW_FIELD_IDX_GTPC_TEID */
102 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12, sizeof(__be32)),
103 	/* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
104 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12, sizeof(__be32)),
105 	/* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
106 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12, sizeof(__be32)),
107 	/* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
108 	ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22, sizeof(__be16),
109 			      0x3f00),
110 	/* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
111 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12, sizeof(__be32)),
112 	/* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
113 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12, sizeof(__be32)),
114 	/* PPPoE */
115 	/* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
116 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2, sizeof(__be16)),
117 	/* PFCP */
118 	/* ICE_FLOW_FIELD_IDX_PFCP_SEID */
119 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12, sizeof(__be64)),
120 	/* L2TPv3 */
121 	/* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
122 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0, sizeof(__be32)),
123 	/* ESP */
124 	/* ICE_FLOW_FIELD_IDX_ESP_SPI */
125 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0, sizeof(__be32)),
126 	/* AH */
127 	/* ICE_FLOW_FIELD_IDX_AH_SPI */
128 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4, sizeof(__be32)),
129 	/* NAT_T_ESP */
130 	/* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
131 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8, sizeof(__be32)),
132 };
133 
134 /* Bitmaps indicating relevant packet types for a particular protocol header
135  *
136  * Packet types for packets with an Outer/First/Single MAC header
137  */
138 static const u32 ice_ptypes_mac_ofos[] = {
139 	0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
140 	0x0000077E, 0x00000000, 0x00000000, 0x00000000,
141 	0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
142 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
143 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
144 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
145 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
146 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
147 };
148 
149 /* Packet types for packets with an Innermost/Last MAC VLAN header */
150 static const u32 ice_ptypes_macvlan_il[] = {
151 	0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
152 	0x0000077E, 0x00000000, 0x00000000, 0x00000000,
153 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
154 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
155 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
156 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
157 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
158 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
159 };
160 
161 /* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
162  * include IPv4 other PTYPEs
163  */
164 static const u32 ice_ptypes_ipv4_ofos[] = {
165 	0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
166 	0x00000000, 0x00000155, 0x00000000, 0x00000000,
167 	0x00000000, 0x000FC000, 0x00000000, 0x00000000,
168 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
169 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
170 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
171 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
172 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
173 };
174 
175 /* Packet types for packets with an Outer/First/Single IPv4 header, includes
176  * IPv4 other PTYPEs
177  */
178 static const u32 ice_ptypes_ipv4_ofos_all[] = {
179 	0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
180 	0x00000000, 0x00000155, 0x00000000, 0x00000000,
181 	0x00000000, 0x000FC000, 0x83E0F800, 0x00000101,
182 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
183 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
184 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
185 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
186 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
187 };
188 
189 /* Packet types for packets with an Innermost/Last IPv4 header */
190 static const u32 ice_ptypes_ipv4_il[] = {
191 	0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
192 	0x0000000E, 0x00000000, 0x00000000, 0x00000000,
193 	0x00000000, 0x00000000, 0x001FF800, 0x00000000,
194 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
195 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
196 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
197 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
198 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
199 };
200 
201 /* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
202  * include IPv6 other PTYPEs
203  */
204 static const u32 ice_ptypes_ipv6_ofos[] = {
205 	0x00000000, 0x00000000, 0x77000000, 0x10002000,
206 	0x00000000, 0x000002AA, 0x00000000, 0x00000000,
207 	0x00000000, 0x03F00000, 0x00000000, 0x00000000,
208 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
209 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
210 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
211 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
212 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
213 };
214 
215 /* Packet types for packets with an Outer/First/Single IPv6 header, includes
216  * IPv6 other PTYPEs
217  */
218 static const u32 ice_ptypes_ipv6_ofos_all[] = {
219 	0x00000000, 0x00000000, 0x77000000, 0x10002000,
220 	0x00000000, 0x000002AA, 0x00000000, 0x00000000,
221 	0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
222 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
223 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
224 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
225 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
226 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
227 };
228 
229 /* Packet types for packets with an Innermost/Last IPv6 header */
230 static const u32 ice_ptypes_ipv6_il[] = {
231 	0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
232 	0x00000770, 0x00000000, 0x00000000, 0x00000000,
233 	0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
234 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
235 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
236 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
237 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
238 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
239 };
240 
241 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
242 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
243 	0x10C00000, 0x04000800, 0x00000000, 0x00000000,
244 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
245 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
246 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
247 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
248 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
249 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
250 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
251 };
252 
253 /* Packet types for packets with an Outermost/First ARP header */
254 static const u32 ice_ptypes_arp_of[] = {
255 	0x00000800, 0x00000000, 0x00000000, 0x00000000,
256 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
257 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
258 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
259 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
260 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
261 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
262 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
263 };
264 
265 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
266 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
267 	0x60000000, 0x18043008, 0x80000002, 0x6010c021,
268 	0x00000008, 0x00000000, 0x00000000, 0x00000000,
269 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
270 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
271 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
272 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
273 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
274 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
275 };
276 
277 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
278 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
279 	0x00000000, 0x00000000, 0x43000000, 0x10002000,
280 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
281 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
282 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
283 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
284 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
285 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
286 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
287 };
288 
289 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
290 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
291 	0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
292 	0x00000430, 0x00000000, 0x00000000, 0x00000000,
293 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
294 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
295 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
296 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
297 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
298 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
299 };
300 
301 /* UDP Packet types for non-tunneled packets or tunneled
302  * packets with inner UDP.
303  */
304 static const u32 ice_ptypes_udp_il[] = {
305 	0x81000000, 0x20204040, 0x04000010, 0x80810102,
306 	0x00000040, 0x00000000, 0x00000000, 0x00000000,
307 	0x00000000, 0x00410000, 0x90842000, 0x00000007,
308 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
309 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
310 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
311 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
312 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
313 };
314 
315 /* Packet types for packets with an Innermost/Last TCP header */
316 static const u32 ice_ptypes_tcp_il[] = {
317 	0x04000000, 0x80810102, 0x10000040, 0x02040408,
318 	0x00000102, 0x00000000, 0x00000000, 0x00000000,
319 	0x00000000, 0x00820000, 0x21084000, 0x00000000,
320 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
321 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
322 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
323 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
324 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
325 };
326 
327 /* Packet types for packets with an Innermost/Last SCTP header */
328 static const u32 ice_ptypes_sctp_il[] = {
329 	0x08000000, 0x01020204, 0x20000081, 0x04080810,
330 	0x00000204, 0x00000000, 0x00000000, 0x00000000,
331 	0x00000000, 0x01040000, 0x00000000, 0x00000000,
332 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
333 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
334 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
335 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
336 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
337 };
338 
339 /* Packet types for packets with an Outermost/First ICMP header */
340 static const u32 ice_ptypes_icmp_of[] = {
341 	0x10000000, 0x00000000, 0x00000000, 0x00000000,
342 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
343 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
344 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
345 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
346 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
347 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
348 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
349 };
350 
351 /* Packet types for packets with an Innermost/Last ICMP header */
352 static const u32 ice_ptypes_icmp_il[] = {
353 	0x00000000, 0x02040408, 0x40000102, 0x08101020,
354 	0x00000408, 0x00000000, 0x00000000, 0x00000000,
355 	0x00000000, 0x00000000, 0x42108000, 0x00000000,
356 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
357 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
358 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
359 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
360 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
361 };
362 
363 /* Packet types for packets with an Outermost/First GRE header */
364 static const u32 ice_ptypes_gre_of[] = {
365 	0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
366 	0x0000017E, 0x00000000, 0x00000000, 0x00000000,
367 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
368 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
369 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
370 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
371 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
372 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
373 };
374 
375 /* Packet types for packets with an Innermost/Last MAC header */
376 static const u32 ice_ptypes_mac_il[] = {
377 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
378 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
379 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
380 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
381 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
382 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
383 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
384 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
385 };
386 
387 /* Packet types for GTPC */
388 static const u32 ice_ptypes_gtpc[] = {
389 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
390 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
391 	0x00000000, 0x00000000, 0x00000180, 0x00000000,
392 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
393 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
394 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
395 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
396 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
397 };
398 
399 /* Packet types for GTPC with TEID */
400 static const u32 ice_ptypes_gtpc_tid[] = {
401 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
402 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
403 	0x00000000, 0x00000000, 0x00000060, 0x00000000,
404 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
405 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
406 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
407 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
408 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
409 };
410 
411 /* Packet types for GTPU */
412 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
413 	{ ICE_MAC_IPV4_GTPU_IPV4_FRAG,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
414 	{ ICE_MAC_IPV4_GTPU_IPV4_PAY,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
415 	{ ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
416 	{ ICE_MAC_IPV4_GTPU_IPV4_TCP,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
417 	{ ICE_MAC_IPV4_GTPU_IPV4_ICMP,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
418 	{ ICE_MAC_IPV6_GTPU_IPV4_FRAG,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
419 	{ ICE_MAC_IPV6_GTPU_IPV4_PAY,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
420 	{ ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
421 	{ ICE_MAC_IPV6_GTPU_IPV4_TCP,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
422 	{ ICE_MAC_IPV6_GTPU_IPV4_ICMP,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
423 	{ ICE_MAC_IPV4_GTPU_IPV6_FRAG,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
424 	{ ICE_MAC_IPV4_GTPU_IPV6_PAY,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
425 	{ ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
426 	{ ICE_MAC_IPV4_GTPU_IPV6_TCP,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
427 	{ ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
428 	{ ICE_MAC_IPV6_GTPU_IPV6_FRAG,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
429 	{ ICE_MAC_IPV6_GTPU_IPV6_PAY,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
430 	{ ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
431 	{ ICE_MAC_IPV6_GTPU_IPV6_TCP,	  ICE_PTYPE_ATTR_GTP_PDU_EH },
432 	{ ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
433 };
434 
435 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
436 	{ ICE_MAC_IPV4_GTPU_IPV4_FRAG,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
437 	{ ICE_MAC_IPV4_GTPU_IPV4_PAY,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
438 	{ ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
439 	{ ICE_MAC_IPV4_GTPU_IPV4_TCP,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
440 	{ ICE_MAC_IPV4_GTPU_IPV4_ICMP,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
441 	{ ICE_MAC_IPV6_GTPU_IPV4_FRAG,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
442 	{ ICE_MAC_IPV6_GTPU_IPV4_PAY,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
443 	{ ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
444 	{ ICE_MAC_IPV6_GTPU_IPV4_TCP,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
445 	{ ICE_MAC_IPV6_GTPU_IPV4_ICMP,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
446 	{ ICE_MAC_IPV4_GTPU_IPV6_FRAG,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
447 	{ ICE_MAC_IPV4_GTPU_IPV6_PAY,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
448 	{ ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
449 	{ ICE_MAC_IPV4_GTPU_IPV6_TCP,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
450 	{ ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
451 	{ ICE_MAC_IPV6_GTPU_IPV6_FRAG,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
452 	{ ICE_MAC_IPV6_GTPU_IPV6_PAY,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
453 	{ ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
454 	{ ICE_MAC_IPV6_GTPU_IPV6_TCP,	  ICE_PTYPE_ATTR_GTP_DOWNLINK },
455 	{ ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
456 };
457 
458 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
459 	{ ICE_MAC_IPV4_GTPU_IPV4_FRAG,	  ICE_PTYPE_ATTR_GTP_UPLINK },
460 	{ ICE_MAC_IPV4_GTPU_IPV4_PAY,	  ICE_PTYPE_ATTR_GTP_UPLINK },
461 	{ ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
462 	{ ICE_MAC_IPV4_GTPU_IPV4_TCP,	  ICE_PTYPE_ATTR_GTP_UPLINK },
463 	{ ICE_MAC_IPV4_GTPU_IPV4_ICMP,	  ICE_PTYPE_ATTR_GTP_UPLINK },
464 	{ ICE_MAC_IPV6_GTPU_IPV4_FRAG,	  ICE_PTYPE_ATTR_GTP_UPLINK },
465 	{ ICE_MAC_IPV6_GTPU_IPV4_PAY,	  ICE_PTYPE_ATTR_GTP_UPLINK },
466 	{ ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
467 	{ ICE_MAC_IPV6_GTPU_IPV4_TCP,	  ICE_PTYPE_ATTR_GTP_UPLINK },
468 	{ ICE_MAC_IPV6_GTPU_IPV4_ICMP,	  ICE_PTYPE_ATTR_GTP_UPLINK },
469 	{ ICE_MAC_IPV4_GTPU_IPV6_FRAG,	  ICE_PTYPE_ATTR_GTP_UPLINK },
470 	{ ICE_MAC_IPV4_GTPU_IPV6_PAY,	  ICE_PTYPE_ATTR_GTP_UPLINK },
471 	{ ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
472 	{ ICE_MAC_IPV4_GTPU_IPV6_TCP,	  ICE_PTYPE_ATTR_GTP_UPLINK },
473 	{ ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
474 	{ ICE_MAC_IPV6_GTPU_IPV6_FRAG,	  ICE_PTYPE_ATTR_GTP_UPLINK },
475 	{ ICE_MAC_IPV6_GTPU_IPV6_PAY,	  ICE_PTYPE_ATTR_GTP_UPLINK },
476 	{ ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
477 	{ ICE_MAC_IPV6_GTPU_IPV6_TCP,	  ICE_PTYPE_ATTR_GTP_UPLINK },
478 	{ ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
479 };
480 
481 static const u32 ice_ptypes_gtpu[] = {
482 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
483 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
484 	0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
485 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
486 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
487 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
488 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
489 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
490 };
491 
492 /* Packet types for PPPoE */
493 static const u32 ice_ptypes_pppoe[] = {
494 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
495 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
496 	0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
497 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
498 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
499 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
500 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
501 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
502 };
503 
504 /* Packet types for packets with PFCP NODE header */
505 static const u32 ice_ptypes_pfcp_node[] = {
506 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
507 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
508 	0x00000000, 0x00000000, 0x80000000, 0x00000002,
509 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
510 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
511 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
512 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
513 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
514 };
515 
516 /* Packet types for packets with PFCP SESSION header */
517 static const u32 ice_ptypes_pfcp_session[] = {
518 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
519 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
520 	0x00000000, 0x00000000, 0x00000000, 0x00000005,
521 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
522 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
523 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
524 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
525 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
526 };
527 
528 /* Packet types for L2TPv3 */
529 static const u32 ice_ptypes_l2tpv3[] = {
530 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
531 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
532 	0x00000000, 0x00000000, 0x00000000, 0x00000300,
533 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
534 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
535 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
536 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
537 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
538 };
539 
540 /* Packet types for ESP */
541 static const u32 ice_ptypes_esp[] = {
542 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
543 	0x00000000, 0x00000003, 0x00000000, 0x00000000,
544 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
545 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
546 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
547 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
548 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
549 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
550 };
551 
552 /* Packet types for AH */
553 static const u32 ice_ptypes_ah[] = {
554 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
555 	0x00000000, 0x0000000C, 0x00000000, 0x00000000,
556 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
557 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
558 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
559 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
560 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
561 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
562 };
563 
564 /* Packet types for packets with NAT_T ESP header */
565 static const u32 ice_ptypes_nat_t_esp[] = {
566 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
567 	0x00000000, 0x00000030, 0x00000000, 0x00000000,
568 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
569 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
570 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
571 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
572 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
573 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
574 };
575 
576 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
577 	0x00000846, 0x00000000, 0x00000000, 0x00000000,
578 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
579 	0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
580 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
581 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
582 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
583 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
584 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
585 };
586 
587 /* Manage parameters and info. used during the creation of a flow profile */
588 struct ice_flow_prof_params {
589 	enum ice_block blk;
590 	u16 entry_length; /* # of bytes formatted entry will require */
591 	u8 es_cnt;
592 	struct ice_flow_prof *prof;
593 
594 	/* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
595 	 * This will give us the direction flags.
596 	 */
597 	struct ice_fv_word es[ICE_MAX_FV_WORDS];
598 	/* attributes can be used to add attributes to a particular PTYPE */
599 	const struct ice_ptype_attributes *attr;
600 	u16 attr_cnt;
601 
602 	u16 mask[ICE_MAX_FV_WORDS];
603 	DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX);
604 };
605 
606 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
607 	(ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
608 	ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
609 	ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
610 	ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
611 	ICE_FLOW_SEG_HDR_NAT_T_ESP)
612 
613 #define ICE_FLOW_SEG_HDRS_L3_MASK	\
614 	(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP)
615 #define ICE_FLOW_SEG_HDRS_L4_MASK	\
616 	(ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
617 	 ICE_FLOW_SEG_HDR_SCTP)
618 /* mask for L4 protocols that are NOT part of IPv4/6 OTHER PTYPE groups */
619 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER	\
620 	(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
621 
622 /**
623  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
624  * @segs: array of one or more packet segments that describe the flow
625  * @segs_cnt: number of packet segments provided
626  */
ice_flow_val_hdrs(struct ice_flow_seg_info * segs,u8 segs_cnt)627 static int ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
628 {
629 	u8 i;
630 
631 	for (i = 0; i < segs_cnt; i++) {
632 		/* Multiple L3 headers */
633 		if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
634 		    !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
635 			return -EINVAL;
636 
637 		/* Multiple L4 headers */
638 		if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
639 		    !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
640 			return -EINVAL;
641 	}
642 
643 	return 0;
644 }
645 
646 /* Sizes of fixed known protocol headers without header options */
647 #define ICE_FLOW_PROT_HDR_SZ_MAC	14
648 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN	(ICE_FLOW_PROT_HDR_SZ_MAC + 2)
649 #define ICE_FLOW_PROT_HDR_SZ_IPV4	20
650 #define ICE_FLOW_PROT_HDR_SZ_IPV6	40
651 #define ICE_FLOW_PROT_HDR_SZ_ARP	28
652 #define ICE_FLOW_PROT_HDR_SZ_ICMP	8
653 #define ICE_FLOW_PROT_HDR_SZ_TCP	20
654 #define ICE_FLOW_PROT_HDR_SZ_UDP	8
655 #define ICE_FLOW_PROT_HDR_SZ_SCTP	12
656 
657 /**
658  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
659  * @params: information about the flow to be processed
660  * @seg: index of packet segment whose header size is to be determined
661  */
ice_flow_calc_seg_sz(struct ice_flow_prof_params * params,u8 seg)662 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
663 {
664 	u16 sz;
665 
666 	/* L2 headers */
667 	sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
668 		ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
669 
670 	/* L3 headers */
671 	if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
672 		sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
673 	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
674 		sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
675 	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
676 		sz += ICE_FLOW_PROT_HDR_SZ_ARP;
677 	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
678 		/* An L3 header is required if L4 is specified */
679 		return 0;
680 
681 	/* L4 headers */
682 	if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
683 		sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
684 	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
685 		sz += ICE_FLOW_PROT_HDR_SZ_TCP;
686 	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
687 		sz += ICE_FLOW_PROT_HDR_SZ_UDP;
688 	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
689 		sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
690 
691 	return sz;
692 }
693 
694 /**
695  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
696  * @params: information about the flow to be processed
697  *
698  * This function identifies the packet types associated with the protocol
699  * headers being present in packet segments of the specified flow profile.
700  */
ice_flow_proc_seg_hdrs(struct ice_flow_prof_params * params)701 static int ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
702 {
703 	struct ice_flow_prof *prof;
704 	u8 i;
705 
706 	memset(params->ptypes, 0xff, sizeof(params->ptypes));
707 
708 	prof = params->prof;
709 
710 	for (i = 0; i < params->prof->segs_cnt; i++) {
711 		const unsigned long *src;
712 		u32 hdrs;
713 
714 		hdrs = prof->segs[i].hdrs;
715 
716 		if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
717 			src = !i ? (const unsigned long *)ice_ptypes_mac_ofos :
718 				(const unsigned long *)ice_ptypes_mac_il;
719 			bitmap_and(params->ptypes, params->ptypes, src,
720 				   ICE_FLOW_PTYPE_MAX);
721 		}
722 
723 		if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
724 			src = (const unsigned long *)ice_ptypes_macvlan_il;
725 			bitmap_and(params->ptypes, params->ptypes, src,
726 				   ICE_FLOW_PTYPE_MAX);
727 		}
728 
729 		if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
730 			bitmap_and(params->ptypes, params->ptypes,
731 				   (const unsigned long *)ice_ptypes_arp_of,
732 				   ICE_FLOW_PTYPE_MAX);
733 		}
734 
735 		if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
736 		    (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
737 			src = i ? (const unsigned long *)ice_ptypes_ipv4_il :
738 				(const unsigned long *)ice_ptypes_ipv4_ofos_all;
739 			bitmap_and(params->ptypes, params->ptypes, src,
740 				   ICE_FLOW_PTYPE_MAX);
741 		} else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
742 			   (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
743 			src = i ? (const unsigned long *)ice_ptypes_ipv6_il :
744 				(const unsigned long *)ice_ptypes_ipv6_ofos_all;
745 			bitmap_and(params->ptypes, params->ptypes, src,
746 				   ICE_FLOW_PTYPE_MAX);
747 		} else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
748 			   !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
749 			src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos_no_l4 :
750 				(const unsigned long *)ice_ptypes_ipv4_il_no_l4;
751 			bitmap_and(params->ptypes, params->ptypes, src,
752 				   ICE_FLOW_PTYPE_MAX);
753 		} else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
754 			src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos :
755 				(const unsigned long *)ice_ptypes_ipv4_il;
756 			bitmap_and(params->ptypes, params->ptypes, src,
757 				   ICE_FLOW_PTYPE_MAX);
758 		} else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
759 			   !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
760 			src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos_no_l4 :
761 				(const unsigned long *)ice_ptypes_ipv6_il_no_l4;
762 			bitmap_and(params->ptypes, params->ptypes, src,
763 				   ICE_FLOW_PTYPE_MAX);
764 		} else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
765 			src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos :
766 				(const unsigned long *)ice_ptypes_ipv6_il;
767 			bitmap_and(params->ptypes, params->ptypes, src,
768 				   ICE_FLOW_PTYPE_MAX);
769 		}
770 
771 		if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
772 			src = (const unsigned long *)ice_ptypes_mac_non_ip_ofos;
773 			bitmap_and(params->ptypes, params->ptypes, src,
774 				   ICE_FLOW_PTYPE_MAX);
775 		} else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
776 			src = (const unsigned long *)ice_ptypes_pppoe;
777 			bitmap_and(params->ptypes, params->ptypes, src,
778 				   ICE_FLOW_PTYPE_MAX);
779 		} else {
780 			src = (const unsigned long *)ice_ptypes_pppoe;
781 			bitmap_andnot(params->ptypes, params->ptypes, src,
782 				      ICE_FLOW_PTYPE_MAX);
783 		}
784 
785 		if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
786 			src = (const unsigned long *)ice_ptypes_udp_il;
787 			bitmap_and(params->ptypes, params->ptypes, src,
788 				   ICE_FLOW_PTYPE_MAX);
789 		} else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
790 			bitmap_and(params->ptypes, params->ptypes,
791 				   (const unsigned long *)ice_ptypes_tcp_il,
792 				   ICE_FLOW_PTYPE_MAX);
793 		} else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
794 			src = (const unsigned long *)ice_ptypes_sctp_il;
795 			bitmap_and(params->ptypes, params->ptypes, src,
796 				   ICE_FLOW_PTYPE_MAX);
797 		}
798 
799 		if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
800 			src = !i ? (const unsigned long *)ice_ptypes_icmp_of :
801 				(const unsigned long *)ice_ptypes_icmp_il;
802 			bitmap_and(params->ptypes, params->ptypes, src,
803 				   ICE_FLOW_PTYPE_MAX);
804 		} else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
805 			if (!i) {
806 				src = (const unsigned long *)ice_ptypes_gre_of;
807 				bitmap_and(params->ptypes, params->ptypes,
808 					   src, ICE_FLOW_PTYPE_MAX);
809 			}
810 		} else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
811 			src = (const unsigned long *)ice_ptypes_gtpc;
812 			bitmap_and(params->ptypes, params->ptypes, src,
813 				   ICE_FLOW_PTYPE_MAX);
814 		} else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
815 			src = (const unsigned long *)ice_ptypes_gtpc_tid;
816 			bitmap_and(params->ptypes, params->ptypes, src,
817 				   ICE_FLOW_PTYPE_MAX);
818 		} else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
819 			src = (const unsigned long *)ice_ptypes_gtpu;
820 			bitmap_and(params->ptypes, params->ptypes, src,
821 				   ICE_FLOW_PTYPE_MAX);
822 
823 			/* Attributes for GTP packet with downlink */
824 			params->attr = ice_attr_gtpu_down;
825 			params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
826 		} else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
827 			src = (const unsigned long *)ice_ptypes_gtpu;
828 			bitmap_and(params->ptypes, params->ptypes, src,
829 				   ICE_FLOW_PTYPE_MAX);
830 
831 			/* Attributes for GTP packet with uplink */
832 			params->attr = ice_attr_gtpu_up;
833 			params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
834 		} else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
835 			src = (const unsigned long *)ice_ptypes_gtpu;
836 			bitmap_and(params->ptypes, params->ptypes, src,
837 				   ICE_FLOW_PTYPE_MAX);
838 
839 			/* Attributes for GTP packet with Extension Header */
840 			params->attr = ice_attr_gtpu_eh;
841 			params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
842 		} else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
843 			src = (const unsigned long *)ice_ptypes_gtpu;
844 			bitmap_and(params->ptypes, params->ptypes, src,
845 				   ICE_FLOW_PTYPE_MAX);
846 		} else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
847 			src = (const unsigned long *)ice_ptypes_l2tpv3;
848 			bitmap_and(params->ptypes, params->ptypes, src,
849 				   ICE_FLOW_PTYPE_MAX);
850 		} else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
851 			src = (const unsigned long *)ice_ptypes_esp;
852 			bitmap_and(params->ptypes, params->ptypes, src,
853 				   ICE_FLOW_PTYPE_MAX);
854 		} else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
855 			src = (const unsigned long *)ice_ptypes_ah;
856 			bitmap_and(params->ptypes, params->ptypes, src,
857 				   ICE_FLOW_PTYPE_MAX);
858 		} else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
859 			src = (const unsigned long *)ice_ptypes_nat_t_esp;
860 			bitmap_and(params->ptypes, params->ptypes, src,
861 				   ICE_FLOW_PTYPE_MAX);
862 		}
863 
864 		if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
865 			if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
866 				src = (const unsigned long *)ice_ptypes_pfcp_node;
867 			else
868 				src = (const unsigned long *)ice_ptypes_pfcp_session;
869 
870 			bitmap_and(params->ptypes, params->ptypes, src,
871 				   ICE_FLOW_PTYPE_MAX);
872 		} else {
873 			src = (const unsigned long *)ice_ptypes_pfcp_node;
874 			bitmap_andnot(params->ptypes, params->ptypes, src,
875 				      ICE_FLOW_PTYPE_MAX);
876 
877 			src = (const unsigned long *)ice_ptypes_pfcp_session;
878 			bitmap_andnot(params->ptypes, params->ptypes, src,
879 				      ICE_FLOW_PTYPE_MAX);
880 		}
881 	}
882 
883 	return 0;
884 }
885 
886 /**
887  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
888  * @hw: pointer to the HW struct
889  * @params: information about the flow to be processed
890  * @seg: packet segment index of the field to be extracted
891  * @fld: ID of field to be extracted
892  * @match: bit field of all fields
893  *
894  * This function determines the protocol ID, offset, and size of the given
895  * field. It then allocates one or more extraction sequence entries for the
896  * given field, and fill the entries with protocol ID and offset information.
897  */
898 static int
ice_flow_xtract_fld(struct ice_hw * hw,struct ice_flow_prof_params * params,u8 seg,enum ice_flow_field fld,u64 match)899 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
900 		    u8 seg, enum ice_flow_field fld, u64 match)
901 {
902 	enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
903 	enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
904 	u8 fv_words = hw->blk[params->blk].es.fvw;
905 	struct ice_flow_fld_info *flds;
906 	u16 cnt, ese_bits, i;
907 	u16 sib_mask = 0;
908 	u16 mask;
909 	u16 off;
910 
911 	flds = params->prof->segs[seg].fields;
912 
913 	switch (fld) {
914 	case ICE_FLOW_FIELD_IDX_ETH_DA:
915 	case ICE_FLOW_FIELD_IDX_ETH_SA:
916 	case ICE_FLOW_FIELD_IDX_S_VLAN:
917 	case ICE_FLOW_FIELD_IDX_C_VLAN:
918 		prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
919 		break;
920 	case ICE_FLOW_FIELD_IDX_ETH_TYPE:
921 		prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
922 		break;
923 	case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
924 		prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
925 		break;
926 	case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
927 		prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
928 		break;
929 	case ICE_FLOW_FIELD_IDX_IPV4_TTL:
930 	case ICE_FLOW_FIELD_IDX_IPV4_PROT:
931 		prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
932 
933 		/* TTL and PROT share the same extraction seq. entry.
934 		 * Each is considered a sibling to the other in terms of sharing
935 		 * the same extraction sequence entry.
936 		 */
937 		if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
938 			sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
939 		else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
940 			sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
941 
942 		/* If the sibling field is also included, that field's
943 		 * mask needs to be included.
944 		 */
945 		if (match & BIT(sib))
946 			sib_mask = ice_flds_info[sib].mask;
947 		break;
948 	case ICE_FLOW_FIELD_IDX_IPV6_TTL:
949 	case ICE_FLOW_FIELD_IDX_IPV6_PROT:
950 		prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
951 
952 		/* TTL and PROT share the same extraction seq. entry.
953 		 * Each is considered a sibling to the other in terms of sharing
954 		 * the same extraction sequence entry.
955 		 */
956 		if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
957 			sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
958 		else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
959 			sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
960 
961 		/* If the sibling field is also included, that field's
962 		 * mask needs to be included.
963 		 */
964 		if (match & BIT(sib))
965 			sib_mask = ice_flds_info[sib].mask;
966 		break;
967 	case ICE_FLOW_FIELD_IDX_IPV4_SA:
968 	case ICE_FLOW_FIELD_IDX_IPV4_DA:
969 		prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
970 		break;
971 	case ICE_FLOW_FIELD_IDX_IPV6_SA:
972 	case ICE_FLOW_FIELD_IDX_IPV6_DA:
973 		prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
974 		break;
975 	case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
976 	case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
977 	case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
978 		prot_id = ICE_PROT_TCP_IL;
979 		break;
980 	case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
981 	case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
982 		prot_id = ICE_PROT_UDP_IL_OR_S;
983 		break;
984 	case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
985 	case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
986 		prot_id = ICE_PROT_SCTP_IL;
987 		break;
988 	case ICE_FLOW_FIELD_IDX_GTPC_TEID:
989 	case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
990 	case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
991 	case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
992 	case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
993 	case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
994 		/* GTP is accessed through UDP OF protocol */
995 		prot_id = ICE_PROT_UDP_OF;
996 		break;
997 	case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
998 		prot_id = ICE_PROT_PPPOE;
999 		break;
1000 	case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1001 		prot_id = ICE_PROT_UDP_IL_OR_S;
1002 		break;
1003 	case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1004 		prot_id = ICE_PROT_L2TPV3;
1005 		break;
1006 	case ICE_FLOW_FIELD_IDX_ESP_SPI:
1007 		prot_id = ICE_PROT_ESP_F;
1008 		break;
1009 	case ICE_FLOW_FIELD_IDX_AH_SPI:
1010 		prot_id = ICE_PROT_ESP_2;
1011 		break;
1012 	case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1013 		prot_id = ICE_PROT_UDP_IL_OR_S;
1014 		break;
1015 	case ICE_FLOW_FIELD_IDX_ARP_SIP:
1016 	case ICE_FLOW_FIELD_IDX_ARP_DIP:
1017 	case ICE_FLOW_FIELD_IDX_ARP_SHA:
1018 	case ICE_FLOW_FIELD_IDX_ARP_DHA:
1019 	case ICE_FLOW_FIELD_IDX_ARP_OP:
1020 		prot_id = ICE_PROT_ARP_OF;
1021 		break;
1022 	case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1023 	case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1024 		/* ICMP type and code share the same extraction seq. entry */
1025 		prot_id = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) ?
1026 				ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1027 		sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1028 			ICE_FLOW_FIELD_IDX_ICMP_CODE :
1029 			ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1030 		break;
1031 	case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1032 		prot_id = ICE_PROT_GRE_OF;
1033 		break;
1034 	default:
1035 		return -EOPNOTSUPP;
1036 	}
1037 
1038 	/* Each extraction sequence entry is a word in size, and extracts a
1039 	 * word-aligned offset from a protocol header.
1040 	 */
1041 	ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1042 
1043 	flds[fld].xtrct.prot_id = prot_id;
1044 	flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1045 		ICE_FLOW_FV_EXTRACT_SZ;
1046 	flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1047 	flds[fld].xtrct.idx = params->es_cnt;
1048 	flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1049 
1050 	/* Adjust the next field-entry index after accommodating the number of
1051 	 * entries this field consumes
1052 	 */
1053 	cnt = DIV_ROUND_UP(flds[fld].xtrct.disp + ice_flds_info[fld].size,
1054 			   ese_bits);
1055 
1056 	/* Fill in the extraction sequence entries needed for this field */
1057 	off = flds[fld].xtrct.off;
1058 	mask = flds[fld].xtrct.mask;
1059 	for (i = 0; i < cnt; i++) {
1060 		/* Only consume an extraction sequence entry if there is no
1061 		 * sibling field associated with this field or the sibling entry
1062 		 * already extracts the word shared with this field.
1063 		 */
1064 		if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1065 		    flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1066 		    flds[sib].xtrct.off != off) {
1067 			u8 idx;
1068 
1069 			/* Make sure the number of extraction sequence required
1070 			 * does not exceed the block's capability
1071 			 */
1072 			if (params->es_cnt >= fv_words)
1073 				return -ENOSPC;
1074 
1075 			/* some blocks require a reversed field vector layout */
1076 			if (hw->blk[params->blk].es.reverse)
1077 				idx = fv_words - params->es_cnt - 1;
1078 			else
1079 				idx = params->es_cnt;
1080 
1081 			params->es[idx].prot_id = prot_id;
1082 			params->es[idx].off = off;
1083 			params->mask[idx] = mask | sib_mask;
1084 			params->es_cnt++;
1085 		}
1086 
1087 		off += ICE_FLOW_FV_EXTRACT_SZ;
1088 	}
1089 
1090 	return 0;
1091 }
1092 
1093 /**
1094  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1095  * @hw: pointer to the HW struct
1096  * @params: information about the flow to be processed
1097  * @seg: index of packet segment whose raw fields are to be extracted
1098  */
1099 static int
ice_flow_xtract_raws(struct ice_hw * hw,struct ice_flow_prof_params * params,u8 seg)1100 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1101 		     u8 seg)
1102 {
1103 	u16 fv_words;
1104 	u16 hdrs_sz;
1105 	u8 i;
1106 
1107 	if (!params->prof->segs[seg].raws_cnt)
1108 		return 0;
1109 
1110 	if (params->prof->segs[seg].raws_cnt >
1111 	    ARRAY_SIZE(params->prof->segs[seg].raws))
1112 		return -ENOSPC;
1113 
1114 	/* Offsets within the segment headers are not supported */
1115 	hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1116 	if (!hdrs_sz)
1117 		return -EINVAL;
1118 
1119 	fv_words = hw->blk[params->blk].es.fvw;
1120 
1121 	for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1122 		struct ice_flow_seg_fld_raw *raw;
1123 		u16 off, cnt, j;
1124 
1125 		raw = &params->prof->segs[seg].raws[i];
1126 
1127 		/* Storing extraction information */
1128 		raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1129 		raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1130 			ICE_FLOW_FV_EXTRACT_SZ;
1131 		raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1132 			BITS_PER_BYTE;
1133 		raw->info.xtrct.idx = params->es_cnt;
1134 
1135 		/* Determine the number of field vector entries this raw field
1136 		 * consumes.
1137 		 */
1138 		cnt = DIV_ROUND_UP(raw->info.xtrct.disp +
1139 				   (raw->info.src.last * BITS_PER_BYTE),
1140 				   (ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE));
1141 		off = raw->info.xtrct.off;
1142 		for (j = 0; j < cnt; j++) {
1143 			u16 idx;
1144 
1145 			/* Make sure the number of extraction sequence required
1146 			 * does not exceed the block's capability
1147 			 */
1148 			if (params->es_cnt >= hw->blk[params->blk].es.count ||
1149 			    params->es_cnt >= ICE_MAX_FV_WORDS)
1150 				return -ENOSPC;
1151 
1152 			/* some blocks require a reversed field vector layout */
1153 			if (hw->blk[params->blk].es.reverse)
1154 				idx = fv_words - params->es_cnt - 1;
1155 			else
1156 				idx = params->es_cnt;
1157 
1158 			params->es[idx].prot_id = raw->info.xtrct.prot_id;
1159 			params->es[idx].off = off;
1160 			params->es_cnt++;
1161 			off += ICE_FLOW_FV_EXTRACT_SZ;
1162 		}
1163 	}
1164 
1165 	return 0;
1166 }
1167 
1168 /**
1169  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1170  * @hw: pointer to the HW struct
1171  * @params: information about the flow to be processed
1172  *
1173  * This function iterates through all matched fields in the given segments, and
1174  * creates an extraction sequence for the fields.
1175  */
1176 static int
ice_flow_create_xtrct_seq(struct ice_hw * hw,struct ice_flow_prof_params * params)1177 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1178 			  struct ice_flow_prof_params *params)
1179 {
1180 	struct ice_flow_prof *prof = params->prof;
1181 	int status = 0;
1182 	u8 i;
1183 
1184 	for (i = 0; i < prof->segs_cnt; i++) {
1185 		u64 match = params->prof->segs[i].match;
1186 		enum ice_flow_field j;
1187 
1188 		for_each_set_bit(j, (unsigned long *)&match,
1189 				 ICE_FLOW_FIELD_IDX_MAX) {
1190 			status = ice_flow_xtract_fld(hw, params, i, j, match);
1191 			if (status)
1192 				return status;
1193 			clear_bit(j, (unsigned long *)&match);
1194 		}
1195 
1196 		/* Process raw matching bytes */
1197 		status = ice_flow_xtract_raws(hw, params, i);
1198 		if (status)
1199 			return status;
1200 	}
1201 
1202 	return status;
1203 }
1204 
1205 /**
1206  * ice_flow_proc_segs - process all packet segments associated with a profile
1207  * @hw: pointer to the HW struct
1208  * @params: information about the flow to be processed
1209  */
1210 static int
ice_flow_proc_segs(struct ice_hw * hw,struct ice_flow_prof_params * params)1211 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1212 {
1213 	int status;
1214 
1215 	status = ice_flow_proc_seg_hdrs(params);
1216 	if (status)
1217 		return status;
1218 
1219 	status = ice_flow_create_xtrct_seq(hw, params);
1220 	if (status)
1221 		return status;
1222 
1223 	switch (params->blk) {
1224 	case ICE_BLK_FD:
1225 	case ICE_BLK_RSS:
1226 		status = 0;
1227 		break;
1228 	default:
1229 		return -EOPNOTSUPP;
1230 	}
1231 
1232 	return status;
1233 }
1234 
1235 #define ICE_FLOW_FIND_PROF_CHK_FLDS	0x00000001
1236 #define ICE_FLOW_FIND_PROF_CHK_VSI	0x00000002
1237 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR	0x00000004
1238 
1239 /**
1240  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1241  * @hw: pointer to the HW struct
1242  * @blk: classification stage
1243  * @dir: flow direction
1244  * @segs: array of one or more packet segments that describe the flow
1245  * @segs_cnt: number of packet segments provided
1246  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1247  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1248  */
1249 static struct ice_flow_prof *
ice_flow_find_prof_conds(struct ice_hw * hw,enum ice_block blk,enum ice_flow_dir dir,struct ice_flow_seg_info * segs,u8 segs_cnt,u16 vsi_handle,u32 conds)1250 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1251 			 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1252 			 u8 segs_cnt, u16 vsi_handle, u32 conds)
1253 {
1254 	struct ice_flow_prof *p, *prof = NULL;
1255 
1256 	mutex_lock(&hw->fl_profs_locks[blk]);
1257 	list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
1258 		if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1259 		    segs_cnt && segs_cnt == p->segs_cnt) {
1260 			u8 i;
1261 
1262 			/* Check for profile-VSI association if specified */
1263 			if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1264 			    ice_is_vsi_valid(hw, vsi_handle) &&
1265 			    !test_bit(vsi_handle, p->vsis))
1266 				continue;
1267 
1268 			/* Protocol headers must be checked. Matched fields are
1269 			 * checked if specified.
1270 			 */
1271 			for (i = 0; i < segs_cnt; i++)
1272 				if (segs[i].hdrs != p->segs[i].hdrs ||
1273 				    ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1274 				     segs[i].match != p->segs[i].match))
1275 					break;
1276 
1277 			/* A match is found if all segments are matched */
1278 			if (i == segs_cnt) {
1279 				prof = p;
1280 				break;
1281 			}
1282 		}
1283 	mutex_unlock(&hw->fl_profs_locks[blk]);
1284 
1285 	return prof;
1286 }
1287 
1288 /**
1289  * ice_flow_find_prof_id - Look up a profile with given profile ID
1290  * @hw: pointer to the HW struct
1291  * @blk: classification stage
1292  * @prof_id: unique ID to identify this flow profile
1293  */
1294 static struct ice_flow_prof *
ice_flow_find_prof_id(struct ice_hw * hw,enum ice_block blk,u64 prof_id)1295 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1296 {
1297 	struct ice_flow_prof *p;
1298 
1299 	list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
1300 		if (p->id == prof_id)
1301 			return p;
1302 
1303 	return NULL;
1304 }
1305 
1306 /**
1307  * ice_flow_rem_entry_sync - Remove a flow entry
1308  * @hw: pointer to the HW struct
1309  * @blk: classification stage
1310  * @entry: flow entry to be removed
1311  */
1312 static int
ice_flow_rem_entry_sync(struct ice_hw * hw,enum ice_block __always_unused blk,struct ice_flow_entry * entry)1313 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
1314 			struct ice_flow_entry *entry)
1315 {
1316 	if (!entry)
1317 		return -EINVAL;
1318 
1319 	list_del(&entry->l_entry);
1320 
1321 	devm_kfree(ice_hw_to_dev(hw), entry->entry);
1322 	devm_kfree(ice_hw_to_dev(hw), entry);
1323 
1324 	return 0;
1325 }
1326 
1327 /**
1328  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1329  * @hw: pointer to the HW struct
1330  * @blk: classification stage
1331  * @dir: flow direction
1332  * @prof_id: unique ID to identify this flow profile
1333  * @segs: array of one or more packet segments that describe the flow
1334  * @segs_cnt: number of packet segments provided
1335  * @prof: stores the returned flow profile added
1336  *
1337  * Assumption: the caller has acquired the lock to the profile list
1338  */
1339 static int
ice_flow_add_prof_sync(struct ice_hw * hw,enum ice_block blk,enum ice_flow_dir dir,u64 prof_id,struct ice_flow_seg_info * segs,u8 segs_cnt,struct ice_flow_prof ** prof)1340 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1341 		       enum ice_flow_dir dir, u64 prof_id,
1342 		       struct ice_flow_seg_info *segs, u8 segs_cnt,
1343 		       struct ice_flow_prof **prof)
1344 {
1345 	struct ice_flow_prof_params *params;
1346 	int status;
1347 	u8 i;
1348 
1349 	if (!prof)
1350 		return -EINVAL;
1351 
1352 	params = kzalloc(sizeof(*params), GFP_KERNEL);
1353 	if (!params)
1354 		return -ENOMEM;
1355 
1356 	params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof),
1357 				    GFP_KERNEL);
1358 	if (!params->prof) {
1359 		status = -ENOMEM;
1360 		goto free_params;
1361 	}
1362 
1363 	/* initialize extraction sequence to all invalid (0xff) */
1364 	for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1365 		params->es[i].prot_id = ICE_PROT_INVALID;
1366 		params->es[i].off = ICE_FV_OFFSET_INVAL;
1367 	}
1368 
1369 	params->blk = blk;
1370 	params->prof->id = prof_id;
1371 	params->prof->dir = dir;
1372 	params->prof->segs_cnt = segs_cnt;
1373 
1374 	/* Make a copy of the segments that need to be persistent in the flow
1375 	 * profile instance
1376 	 */
1377 	for (i = 0; i < segs_cnt; i++)
1378 		memcpy(&params->prof->segs[i], &segs[i], sizeof(*segs));
1379 
1380 	status = ice_flow_proc_segs(hw, params);
1381 	if (status) {
1382 		ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
1383 		goto out;
1384 	}
1385 
1386 	/* Add a HW profile for this flow profile */
1387 	status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
1388 			      params->attr, params->attr_cnt, params->es,
1389 			      params->mask);
1390 	if (status) {
1391 		ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1392 		goto out;
1393 	}
1394 
1395 	INIT_LIST_HEAD(&params->prof->entries);
1396 	mutex_init(&params->prof->entries_lock);
1397 	*prof = params->prof;
1398 
1399 out:
1400 	if (status)
1401 		devm_kfree(ice_hw_to_dev(hw), params->prof);
1402 free_params:
1403 	kfree(params);
1404 
1405 	return status;
1406 }
1407 
1408 /**
1409  * ice_flow_rem_prof_sync - remove a flow profile
1410  * @hw: pointer to the hardware structure
1411  * @blk: classification stage
1412  * @prof: pointer to flow profile to remove
1413  *
1414  * Assumption: the caller has acquired the lock to the profile list
1415  */
1416 static int
ice_flow_rem_prof_sync(struct ice_hw * hw,enum ice_block blk,struct ice_flow_prof * prof)1417 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1418 		       struct ice_flow_prof *prof)
1419 {
1420 	int status;
1421 
1422 	/* Remove all remaining flow entries before removing the flow profile */
1423 	if (!list_empty(&prof->entries)) {
1424 		struct ice_flow_entry *e, *t;
1425 
1426 		mutex_lock(&prof->entries_lock);
1427 
1428 		list_for_each_entry_safe(e, t, &prof->entries, l_entry) {
1429 			status = ice_flow_rem_entry_sync(hw, blk, e);
1430 			if (status)
1431 				break;
1432 		}
1433 
1434 		mutex_unlock(&prof->entries_lock);
1435 	}
1436 
1437 	/* Remove all hardware profiles associated with this flow profile */
1438 	status = ice_rem_prof(hw, blk, prof->id);
1439 	if (!status) {
1440 		list_del(&prof->l_entry);
1441 		mutex_destroy(&prof->entries_lock);
1442 		devm_kfree(ice_hw_to_dev(hw), prof);
1443 	}
1444 
1445 	return status;
1446 }
1447 
1448 /**
1449  * ice_flow_assoc_prof - associate a VSI with a flow profile
1450  * @hw: pointer to the hardware structure
1451  * @blk: classification stage
1452  * @prof: pointer to flow profile
1453  * @vsi_handle: software VSI handle
1454  *
1455  * Assumption: the caller has acquired the lock to the profile list
1456  * and the software VSI handle has been validated
1457  */
1458 static int
ice_flow_assoc_prof(struct ice_hw * hw,enum ice_block blk,struct ice_flow_prof * prof,u16 vsi_handle)1459 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1460 		    struct ice_flow_prof *prof, u16 vsi_handle)
1461 {
1462 	int status = 0;
1463 
1464 	if (!test_bit(vsi_handle, prof->vsis)) {
1465 		status = ice_add_prof_id_flow(hw, blk,
1466 					      ice_get_hw_vsi_num(hw,
1467 								 vsi_handle),
1468 					      prof->id);
1469 		if (!status)
1470 			set_bit(vsi_handle, prof->vsis);
1471 		else
1472 			ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
1473 				  status);
1474 	}
1475 
1476 	return status;
1477 }
1478 
1479 /**
1480  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
1481  * @hw: pointer to the hardware structure
1482  * @blk: classification stage
1483  * @prof: pointer to flow profile
1484  * @vsi_handle: software VSI handle
1485  *
1486  * Assumption: the caller has acquired the lock to the profile list
1487  * and the software VSI handle has been validated
1488  */
1489 static int
ice_flow_disassoc_prof(struct ice_hw * hw,enum ice_block blk,struct ice_flow_prof * prof,u16 vsi_handle)1490 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
1491 		       struct ice_flow_prof *prof, u16 vsi_handle)
1492 {
1493 	int status = 0;
1494 
1495 	if (test_bit(vsi_handle, prof->vsis)) {
1496 		status = ice_rem_prof_id_flow(hw, blk,
1497 					      ice_get_hw_vsi_num(hw,
1498 								 vsi_handle),
1499 					      prof->id);
1500 		if (!status)
1501 			clear_bit(vsi_handle, prof->vsis);
1502 		else
1503 			ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
1504 				  status);
1505 	}
1506 
1507 	return status;
1508 }
1509 
1510 /**
1511  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
1512  * @hw: pointer to the HW struct
1513  * @blk: classification stage
1514  * @dir: flow direction
1515  * @prof_id: unique ID to identify this flow profile
1516  * @segs: array of one or more packet segments that describe the flow
1517  * @segs_cnt: number of packet segments provided
1518  * @prof: stores the returned flow profile added
1519  */
1520 int
ice_flow_add_prof(struct ice_hw * hw,enum ice_block blk,enum ice_flow_dir dir,u64 prof_id,struct ice_flow_seg_info * segs,u8 segs_cnt,struct ice_flow_prof ** prof)1521 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1522 		  u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
1523 		  struct ice_flow_prof **prof)
1524 {
1525 	int status;
1526 
1527 	if (segs_cnt > ICE_FLOW_SEG_MAX)
1528 		return -ENOSPC;
1529 
1530 	if (!segs_cnt)
1531 		return -EINVAL;
1532 
1533 	if (!segs)
1534 		return -EINVAL;
1535 
1536 	status = ice_flow_val_hdrs(segs, segs_cnt);
1537 	if (status)
1538 		return status;
1539 
1540 	mutex_lock(&hw->fl_profs_locks[blk]);
1541 
1542 	status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
1543 					prof);
1544 	if (!status)
1545 		list_add(&(*prof)->l_entry, &hw->fl_profs[blk]);
1546 
1547 	mutex_unlock(&hw->fl_profs_locks[blk]);
1548 
1549 	return status;
1550 }
1551 
1552 /**
1553  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
1554  * @hw: pointer to the HW struct
1555  * @blk: the block for which the flow profile is to be removed
1556  * @prof_id: unique ID of the flow profile to be removed
1557  */
ice_flow_rem_prof(struct ice_hw * hw,enum ice_block blk,u64 prof_id)1558 int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1559 {
1560 	struct ice_flow_prof *prof;
1561 	int status;
1562 
1563 	mutex_lock(&hw->fl_profs_locks[blk]);
1564 
1565 	prof = ice_flow_find_prof_id(hw, blk, prof_id);
1566 	if (!prof) {
1567 		status = -ENOENT;
1568 		goto out;
1569 	}
1570 
1571 	/* prof becomes invalid after the call */
1572 	status = ice_flow_rem_prof_sync(hw, blk, prof);
1573 
1574 out:
1575 	mutex_unlock(&hw->fl_profs_locks[blk]);
1576 
1577 	return status;
1578 }
1579 
1580 /**
1581  * ice_flow_add_entry - Add a flow entry
1582  * @hw: pointer to the HW struct
1583  * @blk: classification stage
1584  * @prof_id: ID of the profile to add a new flow entry to
1585  * @entry_id: unique ID to identify this flow entry
1586  * @vsi_handle: software VSI handle for the flow entry
1587  * @prio: priority of the flow entry
1588  * @data: pointer to a data buffer containing flow entry's match values/masks
1589  * @entry_h: pointer to buffer that receives the new flow entry's handle
1590  */
1591 int
ice_flow_add_entry(struct ice_hw * hw,enum ice_block blk,u64 prof_id,u64 entry_id,u16 vsi_handle,enum ice_flow_priority prio,void * data,u64 * entry_h)1592 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1593 		   u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
1594 		   void *data, u64 *entry_h)
1595 {
1596 	struct ice_flow_entry *e = NULL;
1597 	struct ice_flow_prof *prof;
1598 	int status;
1599 
1600 	/* No flow entry data is expected for RSS */
1601 	if (!entry_h || (!data && blk != ICE_BLK_RSS))
1602 		return -EINVAL;
1603 
1604 	if (!ice_is_vsi_valid(hw, vsi_handle))
1605 		return -EINVAL;
1606 
1607 	mutex_lock(&hw->fl_profs_locks[blk]);
1608 
1609 	prof = ice_flow_find_prof_id(hw, blk, prof_id);
1610 	if (!prof) {
1611 		status = -ENOENT;
1612 	} else {
1613 		/* Allocate memory for the entry being added and associate
1614 		 * the VSI to the found flow profile
1615 		 */
1616 		e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL);
1617 		if (!e)
1618 			status = -ENOMEM;
1619 		else
1620 			status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
1621 	}
1622 
1623 	mutex_unlock(&hw->fl_profs_locks[blk]);
1624 	if (status)
1625 		goto out;
1626 
1627 	e->id = entry_id;
1628 	e->vsi_handle = vsi_handle;
1629 	e->prof = prof;
1630 	e->priority = prio;
1631 
1632 	switch (blk) {
1633 	case ICE_BLK_FD:
1634 	case ICE_BLK_RSS:
1635 		break;
1636 	default:
1637 		status = -EOPNOTSUPP;
1638 		goto out;
1639 	}
1640 
1641 	mutex_lock(&prof->entries_lock);
1642 	list_add(&e->l_entry, &prof->entries);
1643 	mutex_unlock(&prof->entries_lock);
1644 
1645 	*entry_h = ICE_FLOW_ENTRY_HNDL(e);
1646 
1647 out:
1648 	if (status && e) {
1649 		devm_kfree(ice_hw_to_dev(hw), e->entry);
1650 		devm_kfree(ice_hw_to_dev(hw), e);
1651 	}
1652 
1653 	return status;
1654 }
1655 
1656 /**
1657  * ice_flow_rem_entry - Remove a flow entry
1658  * @hw: pointer to the HW struct
1659  * @blk: classification stage
1660  * @entry_h: handle to the flow entry to be removed
1661  */
ice_flow_rem_entry(struct ice_hw * hw,enum ice_block blk,u64 entry_h)1662 int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h)
1663 {
1664 	struct ice_flow_entry *entry;
1665 	struct ice_flow_prof *prof;
1666 	int status = 0;
1667 
1668 	if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
1669 		return -EINVAL;
1670 
1671 	entry = ICE_FLOW_ENTRY_PTR(entry_h);
1672 
1673 	/* Retain the pointer to the flow profile as the entry will be freed */
1674 	prof = entry->prof;
1675 
1676 	if (prof) {
1677 		mutex_lock(&prof->entries_lock);
1678 		status = ice_flow_rem_entry_sync(hw, blk, entry);
1679 		mutex_unlock(&prof->entries_lock);
1680 	}
1681 
1682 	return status;
1683 }
1684 
1685 /**
1686  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
1687  * @seg: packet segment the field being set belongs to
1688  * @fld: field to be set
1689  * @field_type: type of the field
1690  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
1691  *           entry's input buffer
1692  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
1693  *            input buffer
1694  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
1695  *            entry's input buffer
1696  *
1697  * This helper function stores information of a field being matched, including
1698  * the type of the field and the locations of the value to match, the mask, and
1699  * the upper-bound value in the start of the input buffer for a flow entry.
1700  * This function should only be used for fixed-size data structures.
1701  *
1702  * This function also opportunistically determines the protocol headers to be
1703  * present based on the fields being set. Some fields cannot be used alone to
1704  * determine the protocol headers present. Sometimes, fields for particular
1705  * protocol headers are not matched. In those cases, the protocol headers
1706  * must be explicitly set.
1707  */
1708 static void
ice_flow_set_fld_ext(struct ice_flow_seg_info * seg,enum ice_flow_field fld,enum ice_flow_fld_match_type field_type,u16 val_loc,u16 mask_loc,u16 last_loc)1709 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
1710 		     enum ice_flow_fld_match_type field_type, u16 val_loc,
1711 		     u16 mask_loc, u16 last_loc)
1712 {
1713 	u64 bit = BIT_ULL(fld);
1714 
1715 	seg->match |= bit;
1716 	if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
1717 		seg->range |= bit;
1718 
1719 	seg->fields[fld].type = field_type;
1720 	seg->fields[fld].src.val = val_loc;
1721 	seg->fields[fld].src.mask = mask_loc;
1722 	seg->fields[fld].src.last = last_loc;
1723 
1724 	ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
1725 }
1726 
1727 /**
1728  * ice_flow_set_fld - specifies locations of field from entry's input buffer
1729  * @seg: packet segment the field being set belongs to
1730  * @fld: field to be set
1731  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
1732  *           entry's input buffer
1733  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
1734  *            input buffer
1735  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
1736  *            entry's input buffer
1737  * @range: indicate if field being matched is to be in a range
1738  *
1739  * This function specifies the locations, in the form of byte offsets from the
1740  * start of the input buffer for a flow entry, from where the value to match,
1741  * the mask value, and upper value can be extracted. These locations are then
1742  * stored in the flow profile. When adding a flow entry associated with the
1743  * flow profile, these locations will be used to quickly extract the values and
1744  * create the content of a match entry. This function should only be used for
1745  * fixed-size data structures.
1746  */
1747 void
ice_flow_set_fld(struct ice_flow_seg_info * seg,enum ice_flow_field fld,u16 val_loc,u16 mask_loc,u16 last_loc,bool range)1748 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
1749 		 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
1750 {
1751 	enum ice_flow_fld_match_type t = range ?
1752 		ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
1753 
1754 	ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
1755 }
1756 
1757 /**
1758  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
1759  * @seg: packet segment the field being set belongs to
1760  * @off: offset of the raw field from the beginning of the segment in bytes
1761  * @len: length of the raw pattern to be matched
1762  * @val_loc: location of the value to match from entry's input buffer
1763  * @mask_loc: location of mask value from entry's input buffer
1764  *
1765  * This function specifies the offset of the raw field to be match from the
1766  * beginning of the specified packet segment, and the locations, in the form of
1767  * byte offsets from the start of the input buffer for a flow entry, from where
1768  * the value to match and the mask value to be extracted. These locations are
1769  * then stored in the flow profile. When adding flow entries to the associated
1770  * flow profile, these locations can be used to quickly extract the values to
1771  * create the content of a match entry. This function should only be used for
1772  * fixed-size data structures.
1773  */
1774 void
ice_flow_add_fld_raw(struct ice_flow_seg_info * seg,u16 off,u8 len,u16 val_loc,u16 mask_loc)1775 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
1776 		     u16 val_loc, u16 mask_loc)
1777 {
1778 	if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
1779 		seg->raws[seg->raws_cnt].off = off;
1780 		seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
1781 		seg->raws[seg->raws_cnt].info.src.val = val_loc;
1782 		seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
1783 		/* The "last" field is used to store the length of the field */
1784 		seg->raws[seg->raws_cnt].info.src.last = len;
1785 	}
1786 
1787 	/* Overflows of "raws" will be handled as an error condition later in
1788 	 * the flow when this information is processed.
1789 	 */
1790 	seg->raws_cnt++;
1791 }
1792 
1793 /**
1794  * ice_flow_rem_vsi_prof - remove VSI from flow profile
1795  * @hw: pointer to the hardware structure
1796  * @vsi_handle: software VSI handle
1797  * @prof_id: unique ID to identify this flow profile
1798  *
1799  * This function removes the flow entries associated to the input
1800  * VSI handle and disassociate the VSI from the flow profile.
1801  */
ice_flow_rem_vsi_prof(struct ice_hw * hw,u16 vsi_handle,u64 prof_id)1802 int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id)
1803 {
1804 	struct ice_flow_prof *prof;
1805 	int status = 0;
1806 
1807 	if (!ice_is_vsi_valid(hw, vsi_handle))
1808 		return -EINVAL;
1809 
1810 	/* find flow profile pointer with input package block and profile ID */
1811 	prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
1812 	if (!prof) {
1813 		ice_debug(hw, ICE_DBG_PKG, "Cannot find flow profile id=%llu\n",
1814 			  prof_id);
1815 		return -ENOENT;
1816 	}
1817 
1818 	/* Remove all remaining flow entries before removing the flow profile */
1819 	if (!list_empty(&prof->entries)) {
1820 		struct ice_flow_entry *e, *t;
1821 
1822 		mutex_lock(&prof->entries_lock);
1823 		list_for_each_entry_safe(e, t, &prof->entries, l_entry) {
1824 			if (e->vsi_handle != vsi_handle)
1825 				continue;
1826 
1827 			status = ice_flow_rem_entry_sync(hw, ICE_BLK_FD, e);
1828 			if (status)
1829 				break;
1830 		}
1831 		mutex_unlock(&prof->entries_lock);
1832 	}
1833 	if (status)
1834 		return status;
1835 
1836 	/* disassociate the flow profile from sw VSI handle */
1837 	status = ice_flow_disassoc_prof(hw, ICE_BLK_FD, prof, vsi_handle);
1838 	if (status)
1839 		ice_debug(hw, ICE_DBG_PKG, "ice_flow_disassoc_prof() failed with status=%d\n",
1840 			  status);
1841 	return status;
1842 }
1843 
1844 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
1845 	(ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
1846 
1847 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
1848 	(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
1849 
1850 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
1851 	(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
1852 
1853 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
1854 	(ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
1855 	 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
1856 	 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
1857 
1858 /**
1859  * ice_flow_set_rss_seg_info - setup packet segments for RSS
1860  * @segs: pointer to the flow field segment(s)
1861  * @hash_fields: fields to be hashed on for the segment(s)
1862  * @flow_hdr: protocol header fields within a packet segment
1863  *
1864  * Helper function to extract fields from hash bitmap and use flow
1865  * header value to set flow field segment for further use in flow
1866  * profile entry or removal.
1867  */
1868 static int
ice_flow_set_rss_seg_info(struct ice_flow_seg_info * segs,u64 hash_fields,u32 flow_hdr)1869 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
1870 			  u32 flow_hdr)
1871 {
1872 	u64 val;
1873 	u8 i;
1874 
1875 	for_each_set_bit(i, (unsigned long *)&hash_fields,
1876 			 ICE_FLOW_FIELD_IDX_MAX)
1877 		ice_flow_set_fld(segs, (enum ice_flow_field)i,
1878 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1879 				 ICE_FLOW_FLD_OFF_INVAL, false);
1880 
1881 	ICE_FLOW_SET_HDRS(segs, flow_hdr);
1882 
1883 	if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
1884 	    ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
1885 		return -EINVAL;
1886 
1887 	val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
1888 	if (val && !is_power_of_2(val))
1889 		return -EIO;
1890 
1891 	val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
1892 	if (val && !is_power_of_2(val))
1893 		return -EIO;
1894 
1895 	return 0;
1896 }
1897 
1898 /**
1899  * ice_rem_vsi_rss_list - remove VSI from RSS list
1900  * @hw: pointer to the hardware structure
1901  * @vsi_handle: software VSI handle
1902  *
1903  * Remove the VSI from all RSS configurations in the list.
1904  */
ice_rem_vsi_rss_list(struct ice_hw * hw,u16 vsi_handle)1905 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
1906 {
1907 	struct ice_rss_cfg *r, *tmp;
1908 
1909 	if (list_empty(&hw->rss_list_head))
1910 		return;
1911 
1912 	mutex_lock(&hw->rss_locks);
1913 	list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
1914 		if (test_and_clear_bit(vsi_handle, r->vsis))
1915 			if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
1916 				list_del(&r->l_entry);
1917 				devm_kfree(ice_hw_to_dev(hw), r);
1918 			}
1919 	mutex_unlock(&hw->rss_locks);
1920 }
1921 
1922 /**
1923  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
1924  * @hw: pointer to the hardware structure
1925  * @vsi_handle: software VSI handle
1926  *
1927  * This function will iterate through all flow profiles and disassociate
1928  * the VSI from that profile. If the flow profile has no VSIs it will
1929  * be removed.
1930  */
ice_rem_vsi_rss_cfg(struct ice_hw * hw,u16 vsi_handle)1931 int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
1932 {
1933 	const enum ice_block blk = ICE_BLK_RSS;
1934 	struct ice_flow_prof *p, *t;
1935 	int status = 0;
1936 
1937 	if (!ice_is_vsi_valid(hw, vsi_handle))
1938 		return -EINVAL;
1939 
1940 	if (list_empty(&hw->fl_profs[blk]))
1941 		return 0;
1942 
1943 	mutex_lock(&hw->rss_locks);
1944 	list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry)
1945 		if (test_bit(vsi_handle, p->vsis)) {
1946 			status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
1947 			if (status)
1948 				break;
1949 
1950 			if (bitmap_empty(p->vsis, ICE_MAX_VSI)) {
1951 				status = ice_flow_rem_prof(hw, blk, p->id);
1952 				if (status)
1953 					break;
1954 			}
1955 		}
1956 	mutex_unlock(&hw->rss_locks);
1957 
1958 	return status;
1959 }
1960 
1961 /**
1962  * ice_rem_rss_list - remove RSS configuration from list
1963  * @hw: pointer to the hardware structure
1964  * @vsi_handle: software VSI handle
1965  * @prof: pointer to flow profile
1966  *
1967  * Assumption: lock has already been acquired for RSS list
1968  */
1969 static void
ice_rem_rss_list(struct ice_hw * hw,u16 vsi_handle,struct ice_flow_prof * prof)1970 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
1971 {
1972 	struct ice_rss_cfg *r, *tmp;
1973 
1974 	/* Search for RSS hash fields associated to the VSI that match the
1975 	 * hash configurations associated to the flow profile. If found
1976 	 * remove from the RSS entry list of the VSI context and delete entry.
1977 	 */
1978 	list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
1979 		if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
1980 		    r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
1981 			clear_bit(vsi_handle, r->vsis);
1982 			if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
1983 				list_del(&r->l_entry);
1984 				devm_kfree(ice_hw_to_dev(hw), r);
1985 			}
1986 			return;
1987 		}
1988 }
1989 
1990 /**
1991  * ice_add_rss_list - add RSS configuration to list
1992  * @hw: pointer to the hardware structure
1993  * @vsi_handle: software VSI handle
1994  * @prof: pointer to flow profile
1995  *
1996  * Assumption: lock has already been acquired for RSS list
1997  */
1998 static int
ice_add_rss_list(struct ice_hw * hw,u16 vsi_handle,struct ice_flow_prof * prof)1999 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
2000 {
2001 	struct ice_rss_cfg *r, *rss_cfg;
2002 
2003 	list_for_each_entry(r, &hw->rss_list_head, l_entry)
2004 		if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
2005 		    r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
2006 			set_bit(vsi_handle, r->vsis);
2007 			return 0;
2008 		}
2009 
2010 	rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg),
2011 			       GFP_KERNEL);
2012 	if (!rss_cfg)
2013 		return -ENOMEM;
2014 
2015 	rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
2016 	rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
2017 	set_bit(vsi_handle, rss_cfg->vsis);
2018 
2019 	list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head);
2020 
2021 	return 0;
2022 }
2023 
2024 #define ICE_FLOW_PROF_HASH_S	0
2025 #define ICE_FLOW_PROF_HASH_M	(0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
2026 #define ICE_FLOW_PROF_HDR_S	32
2027 #define ICE_FLOW_PROF_HDR_M	(0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
2028 #define ICE_FLOW_PROF_ENCAP_S	63
2029 #define ICE_FLOW_PROF_ENCAP_M	(BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
2030 
2031 #define ICE_RSS_OUTER_HEADERS	1
2032 #define ICE_RSS_INNER_HEADERS	2
2033 
2034 /* Flow profile ID format:
2035  * [0:31] - Packet match fields
2036  * [32:62] - Protocol header
2037  * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
2038  */
2039 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
2040 	((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
2041 	       (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
2042 	       ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0)))
2043 
2044 /**
2045  * ice_add_rss_cfg_sync - add an RSS configuration
2046  * @hw: pointer to the hardware structure
2047  * @vsi_handle: software VSI handle
2048  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
2049  * @addl_hdrs: protocol header fields
2050  * @segs_cnt: packet segment count
2051  *
2052  * Assumption: lock has already been acquired for RSS list
2053  */
2054 static int
ice_add_rss_cfg_sync(struct ice_hw * hw,u16 vsi_handle,u64 hashed_flds,u32 addl_hdrs,u8 segs_cnt)2055 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
2056 		     u32 addl_hdrs, u8 segs_cnt)
2057 {
2058 	const enum ice_block blk = ICE_BLK_RSS;
2059 	struct ice_flow_prof *prof = NULL;
2060 	struct ice_flow_seg_info *segs;
2061 	int status;
2062 
2063 	if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
2064 		return -EINVAL;
2065 
2066 	segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
2067 	if (!segs)
2068 		return -ENOMEM;
2069 
2070 	/* Construct the packet segment info from the hashed fields */
2071 	status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
2072 					   addl_hdrs);
2073 	if (status)
2074 		goto exit;
2075 
2076 	/* Search for a flow profile that has matching headers, hash fields
2077 	 * and has the input VSI associated to it. If found, no further
2078 	 * operations required and exit.
2079 	 */
2080 	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
2081 					vsi_handle,
2082 					ICE_FLOW_FIND_PROF_CHK_FLDS |
2083 					ICE_FLOW_FIND_PROF_CHK_VSI);
2084 	if (prof)
2085 		goto exit;
2086 
2087 	/* Check if a flow profile exists with the same protocol headers and
2088 	 * associated with the input VSI. If so disassociate the VSI from
2089 	 * this profile. The VSI will be added to a new profile created with
2090 	 * the protocol header and new hash field configuration.
2091 	 */
2092 	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
2093 					vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
2094 	if (prof) {
2095 		status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
2096 		if (!status)
2097 			ice_rem_rss_list(hw, vsi_handle, prof);
2098 		else
2099 			goto exit;
2100 
2101 		/* Remove profile if it has no VSIs associated */
2102 		if (bitmap_empty(prof->vsis, ICE_MAX_VSI)) {
2103 			status = ice_flow_rem_prof(hw, blk, prof->id);
2104 			if (status)
2105 				goto exit;
2106 		}
2107 	}
2108 
2109 	/* Search for a profile that has same match fields only. If this
2110 	 * exists then associate the VSI to this profile.
2111 	 */
2112 	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
2113 					vsi_handle,
2114 					ICE_FLOW_FIND_PROF_CHK_FLDS);
2115 	if (prof) {
2116 		status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2117 		if (!status)
2118 			status = ice_add_rss_list(hw, vsi_handle, prof);
2119 		goto exit;
2120 	}
2121 
2122 	/* Create a new flow profile with generated profile and packet
2123 	 * segment information.
2124 	 */
2125 	status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
2126 				   ICE_FLOW_GEN_PROFID(hashed_flds,
2127 						       segs[segs_cnt - 1].hdrs,
2128 						       segs_cnt),
2129 				   segs, segs_cnt, &prof);
2130 	if (status)
2131 		goto exit;
2132 
2133 	status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2134 	/* If association to a new flow profile failed then this profile can
2135 	 * be removed.
2136 	 */
2137 	if (status) {
2138 		ice_flow_rem_prof(hw, blk, prof->id);
2139 		goto exit;
2140 	}
2141 
2142 	status = ice_add_rss_list(hw, vsi_handle, prof);
2143 
2144 exit:
2145 	kfree(segs);
2146 	return status;
2147 }
2148 
2149 /**
2150  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
2151  * @hw: pointer to the hardware structure
2152  * @vsi_handle: software VSI handle
2153  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
2154  * @addl_hdrs: protocol header fields
2155  *
2156  * This function will generate a flow profile based on fields associated with
2157  * the input fields to hash on, the flow type and use the VSI number to add
2158  * a flow entry to the profile.
2159  */
2160 int
ice_add_rss_cfg(struct ice_hw * hw,u16 vsi_handle,u64 hashed_flds,u32 addl_hdrs)2161 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
2162 		u32 addl_hdrs)
2163 {
2164 	int status;
2165 
2166 	if (hashed_flds == ICE_HASH_INVALID ||
2167 	    !ice_is_vsi_valid(hw, vsi_handle))
2168 		return -EINVAL;
2169 
2170 	mutex_lock(&hw->rss_locks);
2171 	status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
2172 				      ICE_RSS_OUTER_HEADERS);
2173 	if (!status)
2174 		status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
2175 					      addl_hdrs, ICE_RSS_INNER_HEADERS);
2176 	mutex_unlock(&hw->rss_locks);
2177 
2178 	return status;
2179 }
2180 
2181 /**
2182  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
2183  * @hw: pointer to the hardware structure
2184  * @vsi_handle: software VSI handle
2185  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
2186  * @addl_hdrs: Protocol header fields within a packet segment
2187  * @segs_cnt: packet segment count
2188  *
2189  * Assumption: lock has already been acquired for RSS list
2190  */
2191 static int
ice_rem_rss_cfg_sync(struct ice_hw * hw,u16 vsi_handle,u64 hashed_flds,u32 addl_hdrs,u8 segs_cnt)2192 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
2193 		     u32 addl_hdrs, u8 segs_cnt)
2194 {
2195 	const enum ice_block blk = ICE_BLK_RSS;
2196 	struct ice_flow_seg_info *segs;
2197 	struct ice_flow_prof *prof;
2198 	int status;
2199 
2200 	segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
2201 	if (!segs)
2202 		return -ENOMEM;
2203 
2204 	/* Construct the packet segment info from the hashed fields */
2205 	status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
2206 					   addl_hdrs);
2207 	if (status)
2208 		goto out;
2209 
2210 	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
2211 					vsi_handle,
2212 					ICE_FLOW_FIND_PROF_CHK_FLDS);
2213 	if (!prof) {
2214 		status = -ENOENT;
2215 		goto out;
2216 	}
2217 
2218 	status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
2219 	if (status)
2220 		goto out;
2221 
2222 	/* Remove RSS configuration from VSI context before deleting
2223 	 * the flow profile.
2224 	 */
2225 	ice_rem_rss_list(hw, vsi_handle, prof);
2226 
2227 	if (bitmap_empty(prof->vsis, ICE_MAX_VSI))
2228 		status = ice_flow_rem_prof(hw, blk, prof->id);
2229 
2230 out:
2231 	kfree(segs);
2232 	return status;
2233 }
2234 
2235 /**
2236  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
2237  * @hw: pointer to the hardware structure
2238  * @vsi_handle: software VSI handle
2239  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
2240  * @addl_hdrs: Protocol header fields within a packet segment
2241  *
2242  * This function will lookup the flow profile based on the input
2243  * hash field bitmap, iterate through the profile entry list of
2244  * that profile and find entry associated with input VSI to be
2245  * removed. Calls are made to underlying flow s which will APIs
2246  * turn build or update buffers for RSS XLT1 section.
2247  */
2248 int __maybe_unused
ice_rem_rss_cfg(struct ice_hw * hw,u16 vsi_handle,u64 hashed_flds,u32 addl_hdrs)2249 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
2250 		u32 addl_hdrs)
2251 {
2252 	int status;
2253 
2254 	if (hashed_flds == ICE_HASH_INVALID ||
2255 	    !ice_is_vsi_valid(hw, vsi_handle))
2256 		return -EINVAL;
2257 
2258 	mutex_lock(&hw->rss_locks);
2259 	status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
2260 				      ICE_RSS_OUTER_HEADERS);
2261 	if (!status)
2262 		status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
2263 					      addl_hdrs, ICE_RSS_INNER_HEADERS);
2264 	mutex_unlock(&hw->rss_locks);
2265 
2266 	return status;
2267 }
2268 
2269 /* Mapping of AVF hash bit fields to an L3-L4 hash combination.
2270  * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash,
2271  * convert its values to their appropriate flow L3, L4 values.
2272  */
2273 #define ICE_FLOW_AVF_RSS_IPV4_MASKS \
2274 	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
2275 	 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
2276 #define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
2277 	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
2278 	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
2279 #define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
2280 	(BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
2281 	 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
2282 	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
2283 #define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
2284 	(ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
2285 	 ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
2286 
2287 #define ICE_FLOW_AVF_RSS_IPV6_MASKS \
2288 	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
2289 	 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
2290 #define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
2291 	(BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
2292 	 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
2293 	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
2294 #define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
2295 	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
2296 	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
2297 #define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
2298 	(ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
2299 	 ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
2300 
2301 /**
2302  * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
2303  * @hw: pointer to the hardware structure
2304  * @vsi_handle: software VSI handle
2305  * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
2306  *
2307  * This function will take the hash bitmap provided by the AVF driver via a
2308  * message, convert it to ICE-compatible values, and configure RSS flow
2309  * profiles.
2310  */
ice_add_avf_rss_cfg(struct ice_hw * hw,u16 vsi_handle,u64 avf_hash)2311 int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
2312 {
2313 	int status = 0;
2314 	u64 hash_flds;
2315 
2316 	if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
2317 	    !ice_is_vsi_valid(hw, vsi_handle))
2318 		return -EINVAL;
2319 
2320 	/* Make sure no unsupported bits are specified */
2321 	if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
2322 			 ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
2323 		return -EIO;
2324 
2325 	hash_flds = avf_hash;
2326 
2327 	/* Always create an L3 RSS configuration for any L4 RSS configuration */
2328 	if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS)
2329 		hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS;
2330 
2331 	if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)
2332 		hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS;
2333 
2334 	/* Create the corresponding RSS configuration for each valid hash bit */
2335 	while (hash_flds) {
2336 		u64 rss_hash = ICE_HASH_INVALID;
2337 
2338 		if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) {
2339 			if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) {
2340 				rss_hash = ICE_FLOW_HASH_IPV4;
2341 				hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS;
2342 			} else if (hash_flds &
2343 				   ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) {
2344 				rss_hash = ICE_FLOW_HASH_IPV4 |
2345 					ICE_FLOW_HASH_TCP_PORT;
2346 				hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS;
2347 			} else if (hash_flds &
2348 				   ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) {
2349 				rss_hash = ICE_FLOW_HASH_IPV4 |
2350 					ICE_FLOW_HASH_UDP_PORT;
2351 				hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
2352 			} else if (hash_flds &
2353 				   BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
2354 				rss_hash = ICE_FLOW_HASH_IPV4 |
2355 					ICE_FLOW_HASH_SCTP_PORT;
2356 				hash_flds &=
2357 					~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
2358 			}
2359 		} else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
2360 			if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
2361 				rss_hash = ICE_FLOW_HASH_IPV6;
2362 				hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS;
2363 			} else if (hash_flds &
2364 				   ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) {
2365 				rss_hash = ICE_FLOW_HASH_IPV6 |
2366 					ICE_FLOW_HASH_TCP_PORT;
2367 				hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS;
2368 			} else if (hash_flds &
2369 				   ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) {
2370 				rss_hash = ICE_FLOW_HASH_IPV6 |
2371 					ICE_FLOW_HASH_UDP_PORT;
2372 				hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
2373 			} else if (hash_flds &
2374 				   BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
2375 				rss_hash = ICE_FLOW_HASH_IPV6 |
2376 					ICE_FLOW_HASH_SCTP_PORT;
2377 				hash_flds &=
2378 					~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
2379 			}
2380 		}
2381 
2382 		if (rss_hash == ICE_HASH_INVALID)
2383 			return -EIO;
2384 
2385 		status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
2386 					 ICE_FLOW_SEG_HDR_NONE);
2387 		if (status)
2388 			break;
2389 	}
2390 
2391 	return status;
2392 }
2393 
2394 /**
2395  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
2396  * @hw: pointer to the hardware structure
2397  * @vsi_handle: software VSI handle
2398  */
ice_replay_rss_cfg(struct ice_hw * hw,u16 vsi_handle)2399 int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
2400 {
2401 	struct ice_rss_cfg *r;
2402 	int status = 0;
2403 
2404 	if (!ice_is_vsi_valid(hw, vsi_handle))
2405 		return -EINVAL;
2406 
2407 	mutex_lock(&hw->rss_locks);
2408 	list_for_each_entry(r, &hw->rss_list_head, l_entry) {
2409 		if (test_bit(vsi_handle, r->vsis)) {
2410 			status = ice_add_rss_cfg_sync(hw, vsi_handle,
2411 						      r->hashed_flds,
2412 						      r->packet_hdr,
2413 						      ICE_RSS_OUTER_HEADERS);
2414 			if (status)
2415 				break;
2416 			status = ice_add_rss_cfg_sync(hw, vsi_handle,
2417 						      r->hashed_flds,
2418 						      r->packet_hdr,
2419 						      ICE_RSS_INNER_HEADERS);
2420 			if (status)
2421 				break;
2422 		}
2423 	}
2424 	mutex_unlock(&hw->rss_locks);
2425 
2426 	return status;
2427 }
2428 
2429 /**
2430  * ice_get_rss_cfg - returns hashed fields for the given header types
2431  * @hw: pointer to the hardware structure
2432  * @vsi_handle: software VSI handle
2433  * @hdrs: protocol header type
2434  *
2435  * This function will return the match fields of the first instance of flow
2436  * profile having the given header types and containing input VSI
2437  */
ice_get_rss_cfg(struct ice_hw * hw,u16 vsi_handle,u32 hdrs)2438 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
2439 {
2440 	u64 rss_hash = ICE_HASH_INVALID;
2441 	struct ice_rss_cfg *r;
2442 
2443 	/* verify if the protocol header is non zero and VSI is valid */
2444 	if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
2445 		return ICE_HASH_INVALID;
2446 
2447 	mutex_lock(&hw->rss_locks);
2448 	list_for_each_entry(r, &hw->rss_list_head, l_entry)
2449 		if (test_bit(vsi_handle, r->vsis) &&
2450 		    r->packet_hdr == hdrs) {
2451 			rss_hash = r->hashed_flds;
2452 			break;
2453 		}
2454 	mutex_unlock(&hw->rss_locks);
2455 
2456 	return rss_hash;
2457 }
2458