1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * RSS and Classifier helpers for Marvell PPv2 Network Controller
4  *
5  * Copyright (C) 2014 Marvell
6  *
7  * Marcin Wojtas <mw@semihalf.com>
8  */
9 
10 #include "mvpp2.h"
11 #include "mvpp2_cls.h"
12 #include "mvpp2_prs.h"
13 
14 #define MVPP2_DEF_FLOW(_type, _id, _opts, _ri, _ri_mask)	\
15 {								\
16 	.flow_type = _type,					\
17 	.flow_id = _id,						\
18 	.supported_hash_opts = _opts,				\
19 	.prs_ri = {						\
20 		.ri = _ri,					\
21 		.ri_mask = _ri_mask				\
22 	}							\
23 }
24 
25 static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
26 	/* TCP over IPv4 flows, Not fragmented, no vlan tag */
27 	MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
28 		       MVPP22_CLS_HEK_IP4_5T,
29 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
30 		       MVPP2_PRS_RI_L4_TCP,
31 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
32 
33 	MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
34 		       MVPP22_CLS_HEK_IP4_5T,
35 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
36 		       MVPP2_PRS_RI_L4_TCP,
37 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
38 
39 	MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
40 		       MVPP22_CLS_HEK_IP4_5T,
41 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
42 		       MVPP2_PRS_RI_L4_TCP,
43 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
44 
45 	/* TCP over IPv4 flows, Not fragmented, with vlan tag */
46 	MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
47 		       MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
48 		       MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
49 		       MVPP2_PRS_IP_MASK),
50 
51 	MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
52 		       MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
53 		       MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
54 		       MVPP2_PRS_IP_MASK),
55 
56 	MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
57 		       MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
58 		       MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
59 		       MVPP2_PRS_IP_MASK),
60 
61 	/* TCP over IPv4 flows, fragmented, no vlan tag */
62 	MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
63 		       MVPP22_CLS_HEK_IP4_2T,
64 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
65 		       MVPP2_PRS_RI_L4_TCP,
66 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
67 
68 	MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
69 		       MVPP22_CLS_HEK_IP4_2T,
70 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
71 		       MVPP2_PRS_RI_L4_TCP,
72 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
73 
74 	MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
75 		       MVPP22_CLS_HEK_IP4_2T,
76 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
77 		       MVPP2_PRS_RI_L4_TCP,
78 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
79 
80 	/* TCP over IPv4 flows, fragmented, with vlan tag */
81 	MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
82 		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
83 		       MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
84 		       MVPP2_PRS_IP_MASK),
85 
86 	MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
87 		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
88 		       MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
89 		       MVPP2_PRS_IP_MASK),
90 
91 	MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
92 		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
93 		       MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
94 		       MVPP2_PRS_IP_MASK),
95 
96 	/* UDP over IPv4 flows, Not fragmented, no vlan tag */
97 	MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
98 		       MVPP22_CLS_HEK_IP4_5T,
99 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
100 		       MVPP2_PRS_RI_L4_UDP,
101 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
102 
103 	MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
104 		       MVPP22_CLS_HEK_IP4_5T,
105 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
106 		       MVPP2_PRS_RI_L4_UDP,
107 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
108 
109 	MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
110 		       MVPP22_CLS_HEK_IP4_5T,
111 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
112 		       MVPP2_PRS_RI_L4_UDP,
113 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
114 
115 	/* UDP over IPv4 flows, Not fragmented, with vlan tag */
116 	MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
117 		       MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
118 		       MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
119 		       MVPP2_PRS_IP_MASK),
120 
121 	MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
122 		       MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
123 		       MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
124 		       MVPP2_PRS_IP_MASK),
125 
126 	MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
127 		       MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
128 		       MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
129 		       MVPP2_PRS_IP_MASK),
130 
131 	/* UDP over IPv4 flows, fragmented, no vlan tag */
132 	MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
133 		       MVPP22_CLS_HEK_IP4_2T,
134 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
135 		       MVPP2_PRS_RI_L4_UDP,
136 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
137 
138 	MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
139 		       MVPP22_CLS_HEK_IP4_2T,
140 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
141 		       MVPP2_PRS_RI_L4_UDP,
142 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
143 
144 	MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
145 		       MVPP22_CLS_HEK_IP4_2T,
146 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
147 		       MVPP2_PRS_RI_L4_UDP,
148 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
149 
150 	/* UDP over IPv4 flows, fragmented, with vlan tag */
151 	MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
152 		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
153 		       MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
154 		       MVPP2_PRS_IP_MASK),
155 
156 	MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
157 		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
158 		       MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
159 		       MVPP2_PRS_IP_MASK),
160 
161 	MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
162 		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
163 		       MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
164 		       MVPP2_PRS_IP_MASK),
165 
166 	/* TCP over IPv6 flows, not fragmented, no vlan tag */
167 	MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
168 		       MVPP22_CLS_HEK_IP6_5T,
169 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
170 		       MVPP2_PRS_RI_L4_TCP,
171 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
172 
173 	MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
174 		       MVPP22_CLS_HEK_IP6_5T,
175 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
176 		       MVPP2_PRS_RI_L4_TCP,
177 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
178 
179 	/* TCP over IPv6 flows, not fragmented, with vlan tag */
180 	MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
181 		       MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
182 		       MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP,
183 		       MVPP2_PRS_IP_MASK),
184 
185 	MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
186 		       MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
187 		       MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP,
188 		       MVPP2_PRS_IP_MASK),
189 
190 	/* TCP over IPv6 flows, fragmented, no vlan tag */
191 	MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
192 		       MVPP22_CLS_HEK_IP6_2T,
193 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
194 		       MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
195 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
196 
197 	MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
198 		       MVPP22_CLS_HEK_IP6_2T,
199 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
200 		       MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
201 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
202 
203 	/* TCP over IPv6 flows, fragmented, with vlan tag */
204 	MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
205 		       MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
206 		       MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
207 		       MVPP2_PRS_RI_L4_TCP,
208 		       MVPP2_PRS_IP_MASK),
209 
210 	MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
211 		       MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
212 		       MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
213 		       MVPP2_PRS_RI_L4_TCP,
214 		       MVPP2_PRS_IP_MASK),
215 
216 	/* UDP over IPv6 flows, not fragmented, no vlan tag */
217 	MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
218 		       MVPP22_CLS_HEK_IP6_5T,
219 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
220 		       MVPP2_PRS_RI_L4_UDP,
221 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
222 
223 	MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
224 		       MVPP22_CLS_HEK_IP6_5T,
225 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
226 		       MVPP2_PRS_RI_L4_UDP,
227 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
228 
229 	/* UDP over IPv6 flows, not fragmented, with vlan tag */
230 	MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
231 		       MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
232 		       MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP,
233 		       MVPP2_PRS_IP_MASK),
234 
235 	MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
236 		       MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
237 		       MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP,
238 		       MVPP2_PRS_IP_MASK),
239 
240 	/* UDP over IPv6 flows, fragmented, no vlan tag */
241 	MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
242 		       MVPP22_CLS_HEK_IP6_2T,
243 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
244 		       MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
245 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
246 
247 	MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
248 		       MVPP22_CLS_HEK_IP6_2T,
249 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
250 		       MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
251 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
252 
253 	/* UDP over IPv6 flows, fragmented, with vlan tag */
254 	MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
255 		       MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
256 		       MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
257 		       MVPP2_PRS_RI_L4_UDP,
258 		       MVPP2_PRS_IP_MASK),
259 
260 	MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
261 		       MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
262 		       MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
263 		       MVPP2_PRS_RI_L4_UDP,
264 		       MVPP2_PRS_IP_MASK),
265 
266 	/* IPv4 flows, no vlan tag */
267 	MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
268 		       MVPP22_CLS_HEK_IP4_2T,
269 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4,
270 		       MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
271 	MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
272 		       MVPP22_CLS_HEK_IP4_2T,
273 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT,
274 		       MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
275 	MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
276 		       MVPP22_CLS_HEK_IP4_2T,
277 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER,
278 		       MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
279 
280 	/* IPv4 flows, with vlan tag */
281 	MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
282 		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
283 		       MVPP2_PRS_RI_L3_IP4,
284 		       MVPP2_PRS_RI_L3_PROTO_MASK),
285 	MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
286 		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
287 		       MVPP2_PRS_RI_L3_IP4_OPT,
288 		       MVPP2_PRS_RI_L3_PROTO_MASK),
289 	MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
290 		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
291 		       MVPP2_PRS_RI_L3_IP4_OTHER,
292 		       MVPP2_PRS_RI_L3_PROTO_MASK),
293 
294 	/* IPv6 flows, no vlan tag */
295 	MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
296 		       MVPP22_CLS_HEK_IP6_2T,
297 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
298 		       MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
299 	MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
300 		       MVPP22_CLS_HEK_IP6_2T,
301 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
302 		       MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
303 
304 	/* IPv6 flows, with vlan tag */
305 	MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
306 		       MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
307 		       MVPP2_PRS_RI_L3_IP6,
308 		       MVPP2_PRS_RI_L3_PROTO_MASK),
309 	MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
310 		       MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
311 		       MVPP2_PRS_RI_L3_IP6,
312 		       MVPP2_PRS_RI_L3_PROTO_MASK),
313 
314 	/* Non IP flow, no vlan tag */
315 	MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_UNTAG,
316 		       0,
317 		       MVPP2_PRS_RI_VLAN_NONE,
318 		       MVPP2_PRS_RI_VLAN_MASK),
319 	/* Non IP flow, with vlan tag */
320 	MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_TAG,
321 		       MVPP22_CLS_HEK_OPT_VLAN,
322 		       0, 0),
323 };
324 
325 u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index)
326 {
327 	mvpp2_write(priv, MVPP2_CTRS_IDX, index);
328 
329 	return mvpp2_read(priv, MVPP2_CLS_FLOW_TBL_HIT_CTR);
330 }
331 
332 void mvpp2_cls_flow_read(struct mvpp2 *priv, int index,
333 			 struct mvpp2_cls_flow_entry *fe)
334 {
335 	fe->index = index;
336 	mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, index);
337 	fe->data[0] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL0_REG);
338 	fe->data[1] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL1_REG);
339 	fe->data[2] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL2_REG);
340 }
341 
342 /* Update classification flow table registers */
343 static void mvpp2_cls_flow_write(struct mvpp2 *priv,
344 				 struct mvpp2_cls_flow_entry *fe)
345 {
346 	mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
347 	mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG,  fe->data[0]);
348 	mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG,  fe->data[1]);
349 	mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG,  fe->data[2]);
350 }
351 
352 u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index)
353 {
354 	mvpp2_write(priv, MVPP2_CTRS_IDX, index);
355 
356 	return mvpp2_read(priv, MVPP2_CLS_DEC_TBL_HIT_CTR);
357 }
358 
359 void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way,
360 			   struct mvpp2_cls_lookup_entry *le)
361 {
362 	u32 val;
363 
364 	val = (way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | lkpid;
365 	mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
366 	le->way = way;
367 	le->lkpid = lkpid;
368 	le->data = mvpp2_read(priv, MVPP2_CLS_LKP_TBL_REG);
369 }
370 
371 /* Update classification lookup table register */
372 static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
373 				   struct mvpp2_cls_lookup_entry *le)
374 {
375 	u32 val;
376 
377 	val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
378 	mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
379 	mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
380 }
381 
382 /* Operations on flow entry */
383 static int mvpp2_cls_flow_hek_num_get(struct mvpp2_cls_flow_entry *fe)
384 {
385 	return fe->data[1] & MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
386 }
387 
388 static void mvpp2_cls_flow_hek_num_set(struct mvpp2_cls_flow_entry *fe,
389 				       int num_of_fields)
390 {
391 	fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
392 	fe->data[1] |= MVPP2_CLS_FLOW_TBL1_N_FIELDS(num_of_fields);
393 }
394 
395 static int mvpp2_cls_flow_hek_get(struct mvpp2_cls_flow_entry *fe,
396 				  int field_index)
397 {
398 	return (fe->data[2] >> MVPP2_CLS_FLOW_TBL2_FLD_OFFS(field_index)) &
399 		MVPP2_CLS_FLOW_TBL2_FLD_MASK;
400 }
401 
402 static void mvpp2_cls_flow_hek_set(struct mvpp2_cls_flow_entry *fe,
403 				   int field_index, int field_id)
404 {
405 	fe->data[2] &= ~MVPP2_CLS_FLOW_TBL2_FLD(field_index,
406 						MVPP2_CLS_FLOW_TBL2_FLD_MASK);
407 	fe->data[2] |= MVPP2_CLS_FLOW_TBL2_FLD(field_index, field_id);
408 }
409 
410 static void mvpp2_cls_flow_eng_set(struct mvpp2_cls_flow_entry *fe,
411 				   int engine)
412 {
413 	fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_ENG(MVPP2_CLS_FLOW_TBL0_ENG_MASK);
414 	fe->data[0] |= MVPP2_CLS_FLOW_TBL0_ENG(engine);
415 }
416 
417 int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe)
418 {
419 	return (fe->data[0] >> MVPP2_CLS_FLOW_TBL0_OFFS) &
420 		MVPP2_CLS_FLOW_TBL0_ENG_MASK;
421 }
422 
423 static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry *fe,
424 				       bool from_packet)
425 {
426 	if (from_packet)
427 		fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
428 	else
429 		fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
430 }
431 
432 static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe,
433 				    bool is_last)
434 {
435 	fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_LAST;
436 	fe->data[0] |= !!is_last;
437 }
438 
439 static void mvpp2_cls_flow_pri_set(struct mvpp2_cls_flow_entry *fe, int prio)
440 {
441 	fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_PRIO(MVPP2_CLS_FLOW_TBL1_PRIO_MASK);
442 	fe->data[1] |= MVPP2_CLS_FLOW_TBL1_PRIO(prio);
443 }
444 
445 static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe,
446 				    u32 port)
447 {
448 	fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
449 }
450 
451 static void mvpp2_cls_flow_lu_type_set(struct mvpp2_cls_flow_entry *fe,
452 				       u8 lu_type)
453 {
454 	fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK);
455 	fe->data[1] |= MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu_type);
456 }
457 
458 /* Initialize the parser entry for the given flow */
459 static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
460 				    const struct mvpp2_cls_flow *flow)
461 {
462 	mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri,
463 			   flow->prs_ri.ri_mask);
464 }
465 
466 /* Initialize the Lookup Id table entry for the given flow */
467 static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
468 				    const struct mvpp2_cls_flow *flow)
469 {
470 	struct mvpp2_cls_lookup_entry le;
471 
472 	le.way = 0;
473 	le.lkpid = flow->flow_id;
474 
475 	/* The default RxQ for this port is set in the C2 lookup */
476 	le.data = 0;
477 
478 	/* We point on the first lookup in the sequence for the flow, that is
479 	 * the C2 lookup.
480 	 */
481 	le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_CLS_FLT_FIRST(flow->flow_id));
482 
483 	/* CLS is always enabled, RSS is enabled/disabled in C2 lookup */
484 	le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
485 
486 	mvpp2_cls_lookup_write(priv, &le);
487 }
488 
489 static void mvpp2_cls_c2_write(struct mvpp2 *priv,
490 			       struct mvpp2_cls_c2_entry *c2)
491 {
492 	u32 val;
493 	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
494 
495 	val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV);
496 	if (c2->valid)
497 		val &= ~MVPP22_CLS_C2_TCAM_INV_BIT;
498 	else
499 		val |= MVPP22_CLS_C2_TCAM_INV_BIT;
500 	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_INV, val);
501 
502 	mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
503 
504 	mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
505 	mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
506 	mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
507 	mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
508 
509 	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
510 	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
511 	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
512 	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
513 	/* Writing TCAM_DATA4 flushes writes to TCAM_DATA0-4 and INV to HW */
514 	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
515 }
516 
517 void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
518 		       struct mvpp2_cls_c2_entry *c2)
519 {
520 	u32 val;
521 	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
522 
523 	c2->index = index;
524 
525 	c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
526 	c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
527 	c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
528 	c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
529 	c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
530 
531 	c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
532 
533 	c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
534 	c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
535 	c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
536 	c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
537 
538 	val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV);
539 	c2->valid = !(val & MVPP22_CLS_C2_TCAM_INV_BIT);
540 }
541 
542 /* Initialize the flow table entries for the given flow */
543 static void mvpp2_cls_flow_init(struct mvpp2 *priv,
544 				const struct mvpp2_cls_flow *flow)
545 {
546 	struct mvpp2_cls_flow_entry fe;
547 	int i, pri = 0;
548 
549 	/* Assign default values to all entries in the flow */
550 	for (i = MVPP2_CLS_FLT_FIRST(flow->flow_id);
551 	     i <= MVPP2_CLS_FLT_LAST(flow->flow_id); i++) {
552 		memset(&fe, 0, sizeof(fe));
553 		fe.index = i;
554 		mvpp2_cls_flow_pri_set(&fe, pri++);
555 
556 		if (i == MVPP2_CLS_FLT_LAST(flow->flow_id))
557 			mvpp2_cls_flow_last_set(&fe, 1);
558 
559 		mvpp2_cls_flow_write(priv, &fe);
560 	}
561 
562 	/* RSS config C2 lookup */
563 	mvpp2_cls_flow_read(priv, MVPP2_CLS_FLT_C2_RSS_ENTRY(flow->flow_id),
564 			    &fe);
565 
566 	mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2);
567 	mvpp2_cls_flow_port_id_sel(&fe, true);
568 	mvpp2_cls_flow_lu_type_set(&fe, MVPP2_CLS_LU_ALL);
569 
570 	/* Add all ports */
571 	for (i = 0; i < MVPP2_MAX_PORTS; i++)
572 		mvpp2_cls_flow_port_add(&fe, BIT(i));
573 
574 	mvpp2_cls_flow_write(priv, &fe);
575 
576 	/* C3Hx lookups */
577 	for (i = 0; i < MVPP2_MAX_PORTS; i++) {
578 		mvpp2_cls_flow_read(priv,
579 				    MVPP2_CLS_FLT_HASH_ENTRY(i, flow->flow_id),
580 				    &fe);
581 
582 		/* Set a default engine. Will be overwritten when setting the
583 		 * real HEK parameters
584 		 */
585 		mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C3HA);
586 		mvpp2_cls_flow_port_id_sel(&fe, true);
587 		mvpp2_cls_flow_port_add(&fe, BIT(i));
588 
589 		mvpp2_cls_flow_write(priv, &fe);
590 	}
591 }
592 
593 /* Adds a field to the Header Extracted Key generation parameters*/
594 static int mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry *fe,
595 				    u32 field_id)
596 {
597 	int nb_fields = mvpp2_cls_flow_hek_num_get(fe);
598 
599 	if (nb_fields == MVPP2_FLOW_N_FIELDS)
600 		return -EINVAL;
601 
602 	mvpp2_cls_flow_hek_set(fe, nb_fields, field_id);
603 
604 	mvpp2_cls_flow_hek_num_set(fe, nb_fields + 1);
605 
606 	return 0;
607 }
608 
609 static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
610 				     unsigned long hash_opts)
611 {
612 	u32 field_id;
613 	int i;
614 
615 	/* Clear old fields */
616 	mvpp2_cls_flow_hek_num_set(fe, 0);
617 	fe->data[2] = 0;
618 
619 	for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
620 		switch (BIT(i)) {
621 		case MVPP22_CLS_HEK_OPT_MAC_DA:
622 			field_id = MVPP22_CLS_FIELD_MAC_DA;
623 			break;
624 		case MVPP22_CLS_HEK_OPT_VLAN:
625 			field_id = MVPP22_CLS_FIELD_VLAN;
626 			break;
627 		case MVPP22_CLS_HEK_OPT_IP4SA:
628 			field_id = MVPP22_CLS_FIELD_IP4SA;
629 			break;
630 		case MVPP22_CLS_HEK_OPT_IP4DA:
631 			field_id = MVPP22_CLS_FIELD_IP4DA;
632 			break;
633 		case MVPP22_CLS_HEK_OPT_IP6SA:
634 			field_id = MVPP22_CLS_FIELD_IP6SA;
635 			break;
636 		case MVPP22_CLS_HEK_OPT_IP6DA:
637 			field_id = MVPP22_CLS_FIELD_IP6DA;
638 			break;
639 		case MVPP22_CLS_HEK_OPT_L4SIP:
640 			field_id = MVPP22_CLS_FIELD_L4SIP;
641 			break;
642 		case MVPP22_CLS_HEK_OPT_L4DIP:
643 			field_id = MVPP22_CLS_FIELD_L4DIP;
644 			break;
645 		default:
646 			return -EINVAL;
647 		}
648 		if (mvpp2_flow_add_hek_field(fe, field_id))
649 			return -EINVAL;
650 	}
651 
652 	return 0;
653 }
654 
655 const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
656 {
657 	if (flow >= MVPP2_N_PRS_FLOWS)
658 		return NULL;
659 
660 	return &cls_flows[flow];
661 }
662 
663 /* Set the hash generation options for the given traffic flow.
664  * One traffic flow (in the ethtool sense) has multiple classification flows,
665  * to handle specific cases such as fragmentation, or the presence of a
666  * VLAN / DSA Tag.
667  *
668  * Each of these individual flows has different constraints, for example we
669  * can't hash fragmented packets on L4 data (else we would risk having packet
670  * re-ordering), so each classification flows masks the options with their
671  * supported ones.
672  *
673  */
674 static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type,
675 					u16 requested_opts)
676 {
677 	const struct mvpp2_cls_flow *flow;
678 	struct mvpp2_cls_flow_entry fe;
679 	int i, engine, flow_index;
680 	u16 hash_opts;
681 
682 	for_each_cls_flow_id_with_type(i, flow_type) {
683 		flow = mvpp2_cls_flow_get(i);
684 		if (!flow)
685 			return -EINVAL;
686 
687 		flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id);
688 
689 		mvpp2_cls_flow_read(port->priv, flow_index, &fe);
690 
691 		hash_opts = flow->supported_hash_opts & requested_opts;
692 
693 		/* Use C3HB engine to access L4 infos. This adds L4 infos to the
694 		 * hash parameters
695 		 */
696 		if (hash_opts & MVPP22_CLS_HEK_L4_OPTS)
697 			engine = MVPP22_CLS_ENGINE_C3HB;
698 		else
699 			engine = MVPP22_CLS_ENGINE_C3HA;
700 
701 		if (mvpp2_flow_set_hek_fields(&fe, hash_opts))
702 			return -EINVAL;
703 
704 		mvpp2_cls_flow_eng_set(&fe, engine);
705 
706 		mvpp2_cls_flow_write(port->priv, &fe);
707 	}
708 
709 	return 0;
710 }
711 
712 u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe)
713 {
714 	u16 hash_opts = 0;
715 	int n_fields, i, field;
716 
717 	n_fields = mvpp2_cls_flow_hek_num_get(fe);
718 
719 	for (i = 0; i < n_fields; i++) {
720 		field = mvpp2_cls_flow_hek_get(fe, i);
721 
722 		switch (field) {
723 		case MVPP22_CLS_FIELD_MAC_DA:
724 			hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
725 			break;
726 		case MVPP22_CLS_FIELD_VLAN:
727 			hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
728 			break;
729 		case MVPP22_CLS_FIELD_L3_PROTO:
730 			hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
731 			break;
732 		case MVPP22_CLS_FIELD_IP4SA:
733 			hash_opts |= MVPP22_CLS_HEK_OPT_IP4SA;
734 			break;
735 		case MVPP22_CLS_FIELD_IP4DA:
736 			hash_opts |= MVPP22_CLS_HEK_OPT_IP4DA;
737 			break;
738 		case MVPP22_CLS_FIELD_IP6SA:
739 			hash_opts |= MVPP22_CLS_HEK_OPT_IP6SA;
740 			break;
741 		case MVPP22_CLS_FIELD_IP6DA:
742 			hash_opts |= MVPP22_CLS_HEK_OPT_IP6DA;
743 			break;
744 		case MVPP22_CLS_FIELD_L4SIP:
745 			hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
746 			break;
747 		case MVPP22_CLS_FIELD_L4DIP:
748 			hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
749 			break;
750 		default:
751 			break;
752 		}
753 	}
754 	return hash_opts;
755 }
756 
757 /* Returns the hash opts for this flow. There are several classifier flows
758  * for one traffic flow, this returns an aggregation of all configurations.
759  */
760 static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
761 {
762 	const struct mvpp2_cls_flow *flow;
763 	struct mvpp2_cls_flow_entry fe;
764 	int i, flow_index;
765 	u16 hash_opts = 0;
766 
767 	for_each_cls_flow_id_with_type(i, flow_type) {
768 		flow = mvpp2_cls_flow_get(i);
769 		if (!flow)
770 			return 0;
771 
772 		flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id);
773 
774 		mvpp2_cls_flow_read(port->priv, flow_index, &fe);
775 
776 		hash_opts |= mvpp2_flow_get_hek_fields(&fe);
777 	}
778 
779 	return hash_opts;
780 }
781 
782 static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
783 {
784 	const struct mvpp2_cls_flow *flow;
785 	int i;
786 
787 	for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) {
788 		flow = mvpp2_cls_flow_get(i);
789 		if (!flow)
790 			break;
791 
792 		mvpp2_cls_flow_prs_init(priv, flow);
793 		mvpp2_cls_flow_lkp_init(priv, flow);
794 		mvpp2_cls_flow_init(priv, flow);
795 	}
796 }
797 
798 static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
799 {
800 	struct mvpp2_cls_c2_entry c2;
801 	u8 qh, ql, pmap;
802 
803 	memset(&c2, 0, sizeof(c2));
804 
805 	c2.index = MVPP22_CLS_C2_RSS_ENTRY(port->id);
806 
807 	pmap = BIT(port->id);
808 	c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
809 	c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
810 
811 	/* Match on Lookup Type */
812 	c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
813 	c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_ALL);
814 
815 	/* Update RSS status after matching this entry */
816 	c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
817 
818 	/* Mark packet as "forwarded to software", needed for RSS */
819 	c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
820 
821 	/* Configure the default rx queue : Update Queue Low and Queue High, but
822 	 * don't lock, since the rx queue selection might be overridden by RSS
823 	 */
824 	c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD) |
825 		   MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD);
826 
827 	qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
828 	ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
829 
830 	c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
831 		      MVPP22_CLS_C2_ATTR0_QLOW(ql);
832 
833 	c2.valid = true;
834 
835 	mvpp2_cls_c2_write(port->priv, &c2);
836 }
837 
838 /* Classifier default initialization */
839 void mvpp2_cls_init(struct mvpp2 *priv)
840 {
841 	struct mvpp2_cls_lookup_entry le;
842 	struct mvpp2_cls_flow_entry fe;
843 	struct mvpp2_cls_c2_entry c2;
844 	int index;
845 
846 	/* Enable classifier */
847 	mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
848 
849 	/* Clear classifier flow table */
850 	memset(&fe.data, 0, sizeof(fe.data));
851 	for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
852 		fe.index = index;
853 		mvpp2_cls_flow_write(priv, &fe);
854 	}
855 
856 	/* Clear classifier lookup table */
857 	le.data = 0;
858 	for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
859 		le.lkpid = index;
860 		le.way = 0;
861 		mvpp2_cls_lookup_write(priv, &le);
862 
863 		le.way = 1;
864 		mvpp2_cls_lookup_write(priv, &le);
865 	}
866 
867 	/* Clear C2 TCAM engine table */
868 	memset(&c2, 0, sizeof(c2));
869 	c2.valid = false;
870 	for (index = 0; index < MVPP22_CLS_C2_N_ENTRIES; index++) {
871 		c2.index = index;
872 		mvpp2_cls_c2_write(priv, &c2);
873 	}
874 
875 	mvpp2_cls_port_init_flows(priv);
876 }
877 
878 void mvpp2_cls_port_config(struct mvpp2_port *port)
879 {
880 	struct mvpp2_cls_lookup_entry le;
881 	u32 val;
882 
883 	/* Set way for the port */
884 	val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
885 	val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
886 	mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
887 
888 	/* Pick the entry to be accessed in lookup ID decoding table
889 	 * according to the way and lkpid.
890 	 */
891 	le.lkpid = port->id;
892 	le.way = 0;
893 	le.data = 0;
894 
895 	/* Set initial CPU queue for receiving packets */
896 	le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
897 	le.data |= port->first_rxq;
898 
899 	/* Disable classification engines */
900 	le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
901 
902 	/* Update lookup ID table entry */
903 	mvpp2_cls_lookup_write(port->priv, &le);
904 
905 	mvpp2_port_c2_cls_init(port);
906 }
907 
908 u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index)
909 {
910 	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2_index);
911 
912 	return mvpp2_read(priv, MVPP22_CLS_C2_HIT_CTR);
913 }
914 
915 static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port)
916 {
917 	struct mvpp2_cls_c2_entry c2;
918 
919 	mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
920 
921 	c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
922 
923 	mvpp2_cls_c2_write(port->priv, &c2);
924 }
925 
926 static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port)
927 {
928 	struct mvpp2_cls_c2_entry c2;
929 
930 	mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
931 
932 	c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN;
933 
934 	mvpp2_cls_c2_write(port->priv, &c2);
935 }
936 
937 void mvpp22_port_rss_enable(struct mvpp2_port *port)
938 {
939 	mvpp2_rss_port_c2_enable(port);
940 }
941 
942 void mvpp22_port_rss_disable(struct mvpp2_port *port)
943 {
944 	mvpp2_rss_port_c2_disable(port);
945 }
946 
947 /* Set CPU queue number for oversize packets */
948 void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
949 {
950 	u32 val;
951 
952 	mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
953 		    port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
954 
955 	mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
956 		    (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
957 
958 	val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
959 	val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
960 	mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
961 }
962 
963 static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
964 {
965 	int nrxqs, cpu, cpus = num_possible_cpus();
966 
967 	/* Number of RXQs per CPU */
968 	nrxqs = port->nrxqs / cpus;
969 
970 	/* CPU that will handle this rx queue */
971 	cpu = rxq / nrxqs;
972 
973 	if (!cpu_online(cpu))
974 		return port->first_rxq;
975 
976 	/* Indirection to better distribute the paquets on the CPUs when
977 	 * configuring the RSS queues.
978 	 */
979 	return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs);
980 }
981 
982 void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table)
983 {
984 	struct mvpp2 *priv = port->priv;
985 	int i;
986 
987 	for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
988 		u32 sel = MVPP22_RSS_INDEX_TABLE(table) |
989 			  MVPP22_RSS_INDEX_TABLE_ENTRY(i);
990 		mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
991 
992 		mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY,
993 			    mvpp22_rxfh_indir(port, port->indir[i]));
994 	}
995 }
996 
997 int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
998 {
999 	u16 hash_opts = 0;
1000 
1001 	switch (info->flow_type) {
1002 	case TCP_V4_FLOW:
1003 	case UDP_V4_FLOW:
1004 	case TCP_V6_FLOW:
1005 	case UDP_V6_FLOW:
1006 		if (info->data & RXH_L4_B_0_1)
1007 			hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
1008 		if (info->data & RXH_L4_B_2_3)
1009 			hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
1010 		/* Fallthrough */
1011 	case IPV4_FLOW:
1012 	case IPV6_FLOW:
1013 		if (info->data & RXH_L2DA)
1014 			hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
1015 		if (info->data & RXH_VLAN)
1016 			hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
1017 		if (info->data & RXH_L3_PROTO)
1018 			hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
1019 		if (info->data & RXH_IP_SRC)
1020 			hash_opts |= (MVPP22_CLS_HEK_OPT_IP4SA |
1021 				     MVPP22_CLS_HEK_OPT_IP6SA);
1022 		if (info->data & RXH_IP_DST)
1023 			hash_opts |= (MVPP22_CLS_HEK_OPT_IP4DA |
1024 				     MVPP22_CLS_HEK_OPT_IP6DA);
1025 		break;
1026 	default: return -EOPNOTSUPP;
1027 	}
1028 
1029 	return mvpp2_port_rss_hash_opts_set(port, info->flow_type, hash_opts);
1030 }
1031 
1032 int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
1033 {
1034 	unsigned long hash_opts;
1035 	int i;
1036 
1037 	hash_opts = mvpp2_port_rss_hash_opts_get(port, info->flow_type);
1038 	info->data = 0;
1039 
1040 	for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
1041 		switch (BIT(i)) {
1042 		case MVPP22_CLS_HEK_OPT_MAC_DA:
1043 			info->data |= RXH_L2DA;
1044 			break;
1045 		case MVPP22_CLS_HEK_OPT_VLAN:
1046 			info->data |= RXH_VLAN;
1047 			break;
1048 		case MVPP22_CLS_HEK_OPT_L3_PROTO:
1049 			info->data |= RXH_L3_PROTO;
1050 			break;
1051 		case MVPP22_CLS_HEK_OPT_IP4SA:
1052 		case MVPP22_CLS_HEK_OPT_IP6SA:
1053 			info->data |= RXH_IP_SRC;
1054 			break;
1055 		case MVPP22_CLS_HEK_OPT_IP4DA:
1056 		case MVPP22_CLS_HEK_OPT_IP6DA:
1057 			info->data |= RXH_IP_DST;
1058 			break;
1059 		case MVPP22_CLS_HEK_OPT_L4SIP:
1060 			info->data |= RXH_L4_B_0_1;
1061 			break;
1062 		case MVPP22_CLS_HEK_OPT_L4DIP:
1063 			info->data |= RXH_L4_B_2_3;
1064 			break;
1065 		default:
1066 			return -EINVAL;
1067 		}
1068 	}
1069 	return 0;
1070 }
1071 
1072 void mvpp22_port_rss_init(struct mvpp2_port *port)
1073 {
1074 	struct mvpp2 *priv = port->priv;
1075 	int i;
1076 
1077 	/* Set the table width: replace the whole classifier Rx queue number
1078 	 * with the ones configured in RSS table entries.
1079 	 */
1080 	mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(port->id));
1081 	mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
1082 
1083 	/* The default RxQ is used as a key to select the RSS table to use.
1084 	 * We use one RSS table per port.
1085 	 */
1086 	mvpp2_write(priv, MVPP22_RSS_INDEX,
1087 		    MVPP22_RSS_INDEX_QUEUE(port->first_rxq));
1088 	mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE,
1089 		    MVPP22_RSS_TABLE_POINTER(port->id));
1090 
1091 	/* Configure the first table to evenly distribute the packets across
1092 	 * real Rx Queues. The table entries map a hash to a port Rx Queue.
1093 	 */
1094 	for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++)
1095 		port->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
1096 
1097 	mvpp22_rss_fill_table(port, port->id);
1098 
1099 	/* Configure default flows */
1100 	mvpp2_port_rss_hash_opts_set(port, IPV4_FLOW, MVPP22_CLS_HEK_IP4_2T);
1101 	mvpp2_port_rss_hash_opts_set(port, IPV6_FLOW, MVPP22_CLS_HEK_IP6_2T);
1102 	mvpp2_port_rss_hash_opts_set(port, TCP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
1103 	mvpp2_port_rss_hash_opts_set(port, TCP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
1104 	mvpp2_port_rss_hash_opts_set(port, UDP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
1105 	mvpp2_port_rss_hash_opts_set(port, UDP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
1106 }
1107