1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Header Parser helpers for Marvell PPv2 Network Controller
4  *
5  * Copyright (C) 2014 Marvell
6  *
7  * Marcin Wojtas <mw@semihalf.com>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/platform_device.h>
14 #include <uapi/linux/ppp_defs.h>
15 #include <net/ip.h>
16 #include <net/ipv6.h>
17 
18 #include "mvpp2.h"
19 #include "mvpp2_prs.h"
20 
21 /* Update parser tcam and sram hw entries */
22 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
23 {
24 	int i;
25 
26 	if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
27 		return -EINVAL;
28 
29 	/* Clear entry invalidation bit */
30 	pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
31 
32 	/* Write sram index - indirect access */
33 	mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
34 	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
35 		mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
36 
37 	/* Write tcam index - indirect access */
38 	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
39 	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
40 		mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
41 
42 	return 0;
43 }
44 
45 /* Initialize tcam entry from hw */
46 int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
47 			   int tid)
48 {
49 	int i;
50 
51 	if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
52 		return -EINVAL;
53 
54 	memset(pe, 0, sizeof(*pe));
55 	pe->index = tid;
56 
57 	/* Write tcam index - indirect access */
58 	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
59 
60 	pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
61 			      MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
62 	if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
63 		return MVPP2_PRS_TCAM_ENTRY_INVALID;
64 
65 	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
66 		pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
67 
68 	/* Write sram index - indirect access */
69 	mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
70 	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
71 		pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
72 
73 	return 0;
74 }
75 
76 /* Invalidate tcam hw entry */
77 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
78 {
79 	/* Write index - indirect access */
80 	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
81 	mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
82 		    MVPP2_PRS_TCAM_INV_MASK);
83 }
84 
85 /* Enable shadow table entry and set its lookup ID */
86 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
87 {
88 	priv->prs_shadow[index].valid = true;
89 	priv->prs_shadow[index].lu = lu;
90 }
91 
92 /* Update ri fields in shadow table entry */
93 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
94 				    unsigned int ri, unsigned int ri_mask)
95 {
96 	priv->prs_shadow[index].ri_mask = ri_mask;
97 	priv->prs_shadow[index].ri = ri;
98 }
99 
100 /* Update lookup field in tcam sw entry */
101 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
102 {
103 	pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK);
104 	pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
105 	pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK);
106 	pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
107 }
108 
109 /* Update mask for single port in tcam sw entry */
110 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
111 				    unsigned int port, bool add)
112 {
113 	if (add)
114 		pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port));
115 	else
116 		pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port));
117 }
118 
119 /* Update port map in tcam sw entry */
120 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
121 					unsigned int ports)
122 {
123 	pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK);
124 	pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK);
125 	pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK);
126 }
127 
128 /* Obtain port map from tcam sw entry */
129 unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
130 {
131 	return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK;
132 }
133 
134 /* Set byte of data and its enable bits in tcam sw entry */
135 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
136 					 unsigned int offs, unsigned char byte,
137 					 unsigned char enable)
138 {
139 	int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
140 
141 	pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos);
142 	pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos);
143 	pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos;
144 	pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos);
145 }
146 
147 /* Get byte of data and its enable bits from tcam sw entry */
148 void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
149 				  unsigned int offs, unsigned char *byte,
150 				  unsigned char *enable)
151 {
152 	int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
153 
154 	*byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff;
155 	*enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff;
156 }
157 
158 /* Compare tcam data bytes with a pattern */
159 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
160 				    u16 data)
161 {
162 	u16 tcam_data;
163 
164 	tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff;
165 	return tcam_data == data;
166 }
167 
168 /* Update ai bits in tcam sw entry */
169 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
170 				     unsigned int bits, unsigned int enable)
171 {
172 	int i;
173 
174 	for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
175 		if (!(enable & BIT(i)))
176 			continue;
177 
178 		if (bits & BIT(i))
179 			pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i);
180 		else
181 			pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i);
182 	}
183 
184 	pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable);
185 }
186 
187 /* Get ai bits from tcam sw entry */
188 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
189 {
190 	return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK;
191 }
192 
193 /* Set ethertype in tcam sw entry */
194 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
195 				  unsigned short ethertype)
196 {
197 	mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
198 	mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
199 }
200 
201 /* Set vid in tcam sw entry */
202 static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
203 				unsigned short vid)
204 {
205 	mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf);
206 	mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff);
207 }
208 
209 /* Set bits in sram sw entry */
210 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
211 				    u32 val)
212 {
213 	pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num)));
214 }
215 
216 /* Clear bits in sram sw entry */
217 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
218 				      u32 val)
219 {
220 	pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num)));
221 }
222 
223 /* Update ri bits in sram sw entry */
224 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
225 				     unsigned int bits, unsigned int mask)
226 {
227 	unsigned int i;
228 
229 	for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
230 		if (!(mask & BIT(i)))
231 			continue;
232 
233 		if (bits & BIT(i))
234 			mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i,
235 						1);
236 		else
237 			mvpp2_prs_sram_bits_clear(pe,
238 						  MVPP2_PRS_SRAM_RI_OFFS + i,
239 						  1);
240 
241 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
242 	}
243 }
244 
245 /* Obtain ri bits from sram sw entry */
246 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
247 {
248 	return pe->sram[MVPP2_PRS_SRAM_RI_WORD];
249 }
250 
251 /* Update ai bits in sram sw entry */
252 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
253 				     unsigned int bits, unsigned int mask)
254 {
255 	unsigned int i;
256 
257 	for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
258 		if (!(mask & BIT(i)))
259 			continue;
260 
261 		if (bits & BIT(i))
262 			mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i,
263 						1);
264 		else
265 			mvpp2_prs_sram_bits_clear(pe,
266 						  MVPP2_PRS_SRAM_AI_OFFS + i,
267 						  1);
268 
269 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
270 	}
271 }
272 
273 /* Read ai bits from sram sw entry */
274 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
275 {
276 	u8 bits;
277 	/* ai is stored on bits 90->97; so it spreads across two u32 */
278 	int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS);
279 	int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS);
280 
281 	bits = (pe->sram[ai_off] >> ai_shift) |
282 	       (pe->sram[ai_off + 1] << (32 - ai_shift));
283 
284 	return bits;
285 }
286 
287 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
288  * lookup interation
289  */
290 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
291 				       unsigned int lu)
292 {
293 	int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
294 
295 	mvpp2_prs_sram_bits_clear(pe, sram_next_off,
296 				  MVPP2_PRS_SRAM_NEXT_LU_MASK);
297 	mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
298 }
299 
300 /* In the sram sw entry set sign and value of the next lookup offset
301  * and the offset value generated to the classifier
302  */
303 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
304 				     unsigned int op)
305 {
306 	/* Set sign */
307 	if (shift < 0) {
308 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
309 		shift = 0 - shift;
310 	} else {
311 		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
312 	}
313 
314 	/* Set value */
315 	pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
316 		shift & MVPP2_PRS_SRAM_SHIFT_MASK;
317 
318 	/* Reset and set operation */
319 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
320 				  MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
321 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
322 
323 	/* Set base offset as current */
324 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
325 }
326 
327 /* In the sram sw entry set sign and value of the user defined offset
328  * generated to the classifier
329  */
330 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
331 				      unsigned int type, int offset,
332 				      unsigned int op)
333 {
334 	/* Set sign */
335 	if (offset < 0) {
336 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
337 		offset = 0 - offset;
338 	} else {
339 		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
340 	}
341 
342 	/* Set value */
343 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
344 				  MVPP2_PRS_SRAM_UDF_MASK);
345 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS,
346 				offset & MVPP2_PRS_SRAM_UDF_MASK);
347 
348 	/* Set offset type */
349 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
350 				  MVPP2_PRS_SRAM_UDF_TYPE_MASK);
351 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
352 
353 	/* Set offset operation */
354 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
355 				  MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
356 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
357 				op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
358 
359 	/* Set base offset as current */
360 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
361 }
362 
363 /* Find parser flow entry */
364 static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
365 {
366 	struct mvpp2_prs_entry pe;
367 	int tid;
368 
369 	/* Go through the all entires with MVPP2_PRS_LU_FLOWS */
370 	for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
371 		u8 bits;
372 
373 		if (!priv->prs_shadow[tid].valid ||
374 		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
375 			continue;
376 
377 		mvpp2_prs_init_from_hw(priv, &pe, tid);
378 		bits = mvpp2_prs_sram_ai_get(&pe);
379 
380 		/* Sram store classification lookup ID in AI bits [5:0] */
381 		if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
382 			return tid;
383 	}
384 
385 	return -ENOENT;
386 }
387 
388 /* Return first free tcam index, seeking from start to end */
389 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
390 				     unsigned char end)
391 {
392 	int tid;
393 
394 	if (start > end)
395 		swap(start, end);
396 
397 	if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
398 		end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
399 
400 	for (tid = start; tid <= end; tid++) {
401 		if (!priv->prs_shadow[tid].valid)
402 			return tid;
403 	}
404 
405 	return -EINVAL;
406 }
407 
408 /* Drop flow control pause frames */
409 static void mvpp2_prs_drop_fc(struct mvpp2 *priv)
410 {
411 	unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
412 	struct mvpp2_prs_entry pe;
413 	unsigned int len;
414 
415 	memset(&pe, 0, sizeof(pe));
416 
417 	/* For all ports - drop flow control frames */
418 	pe.index = MVPP2_PE_FC_DROP;
419 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
420 
421 	/* Set match on DA */
422 	len = ETH_ALEN;
423 	while (len--)
424 		mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
425 
426 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
427 				 MVPP2_PRS_RI_DROP_MASK);
428 
429 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
430 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
431 
432 	/* Mask all ports */
433 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
434 
435 	/* Update shadow table and hw entry */
436 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
437 	mvpp2_prs_hw_write(priv, &pe);
438 }
439 
440 /* Enable/disable dropping all mac da's */
441 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
442 {
443 	struct mvpp2_prs_entry pe;
444 
445 	if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
446 		/* Entry exist - update port only */
447 		mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
448 	} else {
449 		/* Entry doesn't exist - create new */
450 		memset(&pe, 0, sizeof(pe));
451 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
452 		pe.index = MVPP2_PE_DROP_ALL;
453 
454 		/* Non-promiscuous mode for all ports - DROP unknown packets */
455 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
456 					 MVPP2_PRS_RI_DROP_MASK);
457 
458 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
459 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
460 
461 		/* Update shadow table */
462 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
463 
464 		/* Mask all ports */
465 		mvpp2_prs_tcam_port_map_set(&pe, 0);
466 	}
467 
468 	/* Update port mask */
469 	mvpp2_prs_tcam_port_set(&pe, port, add);
470 
471 	mvpp2_prs_hw_write(priv, &pe);
472 }
473 
474 /* Set port to unicast or multicast promiscuous mode */
475 void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
476 			       enum mvpp2_prs_l2_cast l2_cast, bool add)
477 {
478 	struct mvpp2_prs_entry pe;
479 	unsigned char cast_match;
480 	unsigned int ri;
481 	int tid;
482 
483 	if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
484 		cast_match = MVPP2_PRS_UCAST_VAL;
485 		tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
486 		ri = MVPP2_PRS_RI_L2_UCAST;
487 	} else {
488 		cast_match = MVPP2_PRS_MCAST_VAL;
489 		tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
490 		ri = MVPP2_PRS_RI_L2_MCAST;
491 	}
492 
493 	/* promiscuous mode - Accept unknown unicast or multicast packets */
494 	if (priv->prs_shadow[tid].valid) {
495 		mvpp2_prs_init_from_hw(priv, &pe, tid);
496 	} else {
497 		memset(&pe, 0, sizeof(pe));
498 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
499 		pe.index = tid;
500 
501 		/* Continue - set next lookup */
502 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
503 
504 		/* Set result info bits */
505 		mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
506 
507 		/* Match UC or MC addresses */
508 		mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
509 					     MVPP2_PRS_CAST_MASK);
510 
511 		/* Shift to ethertype */
512 		mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
513 					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
514 
515 		/* Mask all ports */
516 		mvpp2_prs_tcam_port_map_set(&pe, 0);
517 
518 		/* Update shadow table */
519 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
520 	}
521 
522 	/* Update port mask */
523 	mvpp2_prs_tcam_port_set(&pe, port, add);
524 
525 	mvpp2_prs_hw_write(priv, &pe);
526 }
527 
528 /* Set entry for dsa packets */
529 static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
530 				  bool tagged, bool extend)
531 {
532 	struct mvpp2_prs_entry pe;
533 	int tid, shift;
534 
535 	if (extend) {
536 		tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
537 		shift = 8;
538 	} else {
539 		tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
540 		shift = 4;
541 	}
542 
543 	if (priv->prs_shadow[tid].valid) {
544 		/* Entry exist - update port only */
545 		mvpp2_prs_init_from_hw(priv, &pe, tid);
546 	} else {
547 		/* Entry doesn't exist - create new */
548 		memset(&pe, 0, sizeof(pe));
549 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
550 		pe.index = tid;
551 
552 		/* Update shadow table */
553 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
554 
555 		if (tagged) {
556 			/* Set tagged bit in DSA tag */
557 			mvpp2_prs_tcam_data_byte_set(&pe, 0,
558 					     MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
559 					     MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
560 
561 			/* Set ai bits for next iteration */
562 			if (extend)
563 				mvpp2_prs_sram_ai_update(&pe, 1,
564 							MVPP2_PRS_SRAM_AI_MASK);
565 			else
566 				mvpp2_prs_sram_ai_update(&pe, 0,
567 							MVPP2_PRS_SRAM_AI_MASK);
568 
569 			/* Set result info bits to 'single vlan' */
570 			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
571 						 MVPP2_PRS_RI_VLAN_MASK);
572 			/* If packet is tagged continue check vid filtering */
573 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
574 		} else {
575 			/* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
576 			mvpp2_prs_sram_shift_set(&pe, shift,
577 					MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
578 
579 			/* Set result info bits to 'no vlans' */
580 			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
581 						 MVPP2_PRS_RI_VLAN_MASK);
582 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
583 		}
584 
585 		/* Mask all ports */
586 		mvpp2_prs_tcam_port_map_set(&pe, 0);
587 	}
588 
589 	/* Update port mask */
590 	mvpp2_prs_tcam_port_set(&pe, port, add);
591 
592 	mvpp2_prs_hw_write(priv, &pe);
593 }
594 
595 /* Set entry for dsa ethertype */
596 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
597 					    bool add, bool tagged, bool extend)
598 {
599 	struct mvpp2_prs_entry pe;
600 	int tid, shift, port_mask;
601 
602 	if (extend) {
603 		tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
604 		      MVPP2_PE_ETYPE_EDSA_UNTAGGED;
605 		port_mask = 0;
606 		shift = 8;
607 	} else {
608 		tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
609 		      MVPP2_PE_ETYPE_DSA_UNTAGGED;
610 		port_mask = MVPP2_PRS_PORT_MASK;
611 		shift = 4;
612 	}
613 
614 	if (priv->prs_shadow[tid].valid) {
615 		/* Entry exist - update port only */
616 		mvpp2_prs_init_from_hw(priv, &pe, tid);
617 	} else {
618 		/* Entry doesn't exist - create new */
619 		memset(&pe, 0, sizeof(pe));
620 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
621 		pe.index = tid;
622 
623 		/* Set ethertype */
624 		mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
625 		mvpp2_prs_match_etype(&pe, 2, 0);
626 
627 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
628 					 MVPP2_PRS_RI_DSA_MASK);
629 		/* Shift ethertype + 2 byte reserved + tag*/
630 		mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
631 					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
632 
633 		/* Update shadow table */
634 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
635 
636 		if (tagged) {
637 			/* Set tagged bit in DSA tag */
638 			mvpp2_prs_tcam_data_byte_set(&pe,
639 						     MVPP2_ETH_TYPE_LEN + 2 + 3,
640 						 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
641 						 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
642 			/* Clear all ai bits for next iteration */
643 			mvpp2_prs_sram_ai_update(&pe, 0,
644 						 MVPP2_PRS_SRAM_AI_MASK);
645 			/* If packet is tagged continue check vlans */
646 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
647 		} else {
648 			/* Set result info bits to 'no vlans' */
649 			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
650 						 MVPP2_PRS_RI_VLAN_MASK);
651 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
652 		}
653 		/* Mask/unmask all ports, depending on dsa type */
654 		mvpp2_prs_tcam_port_map_set(&pe, port_mask);
655 	}
656 
657 	/* Update port mask */
658 	mvpp2_prs_tcam_port_set(&pe, port, add);
659 
660 	mvpp2_prs_hw_write(priv, &pe);
661 }
662 
663 /* Search for existing single/triple vlan entry */
664 static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
665 {
666 	struct mvpp2_prs_entry pe;
667 	int tid;
668 
669 	/* Go through the all entries with MVPP2_PRS_LU_VLAN */
670 	for (tid = MVPP2_PE_FIRST_FREE_TID;
671 	     tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
672 		unsigned int ri_bits, ai_bits;
673 		bool match;
674 
675 		if (!priv->prs_shadow[tid].valid ||
676 		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
677 			continue;
678 
679 		mvpp2_prs_init_from_hw(priv, &pe, tid);
680 		match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid);
681 		if (!match)
682 			continue;
683 
684 		/* Get vlan type */
685 		ri_bits = mvpp2_prs_sram_ri_get(&pe);
686 		ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
687 
688 		/* Get current ai value from tcam */
689 		ai_bits = mvpp2_prs_tcam_ai_get(&pe);
690 		/* Clear double vlan bit */
691 		ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
692 
693 		if (ai != ai_bits)
694 			continue;
695 
696 		if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
697 		    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
698 			return tid;
699 	}
700 
701 	return -ENOENT;
702 }
703 
704 /* Add/update single/triple vlan entry */
705 static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
706 			      unsigned int port_map)
707 {
708 	struct mvpp2_prs_entry pe;
709 	int tid_aux, tid;
710 	int ret = 0;
711 
712 	memset(&pe, 0, sizeof(pe));
713 
714 	tid = mvpp2_prs_vlan_find(priv, tpid, ai);
715 
716 	if (tid < 0) {
717 		/* Create new tcam entry */
718 		tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
719 						MVPP2_PE_FIRST_FREE_TID);
720 		if (tid < 0)
721 			return tid;
722 
723 		/* Get last double vlan tid */
724 		for (tid_aux = MVPP2_PE_LAST_FREE_TID;
725 		     tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
726 			unsigned int ri_bits;
727 
728 			if (!priv->prs_shadow[tid_aux].valid ||
729 			    priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
730 				continue;
731 
732 			mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
733 			ri_bits = mvpp2_prs_sram_ri_get(&pe);
734 			if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
735 			    MVPP2_PRS_RI_VLAN_DOUBLE)
736 				break;
737 		}
738 
739 		if (tid <= tid_aux)
740 			return -EINVAL;
741 
742 		memset(&pe, 0, sizeof(pe));
743 		pe.index = tid;
744 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
745 
746 		mvpp2_prs_match_etype(&pe, 0, tpid);
747 
748 		/* VLAN tag detected, proceed with VID filtering */
749 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
750 
751 		/* Clear all ai bits for next iteration */
752 		mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
753 
754 		if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
755 			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
756 						 MVPP2_PRS_RI_VLAN_MASK);
757 		} else {
758 			ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
759 			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE,
760 						 MVPP2_PRS_RI_VLAN_MASK);
761 		}
762 		mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK);
763 
764 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
765 	} else {
766 		mvpp2_prs_init_from_hw(priv, &pe, tid);
767 	}
768 	/* Update ports' mask */
769 	mvpp2_prs_tcam_port_map_set(&pe, port_map);
770 
771 	mvpp2_prs_hw_write(priv, &pe);
772 
773 	return ret;
774 }
775 
776 /* Get first free double vlan ai number */
777 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
778 {
779 	int i;
780 
781 	for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
782 		if (!priv->prs_double_vlans[i])
783 			return i;
784 	}
785 
786 	return -EINVAL;
787 }
788 
789 /* Search for existing double vlan entry */
790 static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
791 				      unsigned short tpid2)
792 {
793 	struct mvpp2_prs_entry pe;
794 	int tid;
795 
796 	/* Go through the all entries with MVPP2_PRS_LU_VLAN */
797 	for (tid = MVPP2_PE_FIRST_FREE_TID;
798 	     tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
799 		unsigned int ri_mask;
800 		bool match;
801 
802 		if (!priv->prs_shadow[tid].valid ||
803 		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
804 			continue;
805 
806 		mvpp2_prs_init_from_hw(priv, &pe, tid);
807 
808 		match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) &&
809 			mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2);
810 
811 		if (!match)
812 			continue;
813 
814 		ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK;
815 		if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
816 			return tid;
817 	}
818 
819 	return -ENOENT;
820 }
821 
822 /* Add or update double vlan entry */
823 static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
824 				     unsigned short tpid2,
825 				     unsigned int port_map)
826 {
827 	int tid_aux, tid, ai, ret = 0;
828 	struct mvpp2_prs_entry pe;
829 
830 	memset(&pe, 0, sizeof(pe));
831 
832 	tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
833 
834 	if (tid < 0) {
835 		/* Create new tcam entry */
836 		tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
837 				MVPP2_PE_LAST_FREE_TID);
838 		if (tid < 0)
839 			return tid;
840 
841 		/* Set ai value for new double vlan entry */
842 		ai = mvpp2_prs_double_vlan_ai_free_get(priv);
843 		if (ai < 0)
844 			return ai;
845 
846 		/* Get first single/triple vlan tid */
847 		for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
848 		     tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
849 			unsigned int ri_bits;
850 
851 			if (!priv->prs_shadow[tid_aux].valid ||
852 			    priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
853 				continue;
854 
855 			mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
856 			ri_bits = mvpp2_prs_sram_ri_get(&pe);
857 			ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
858 			if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
859 			    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
860 				break;
861 		}
862 
863 		if (tid >= tid_aux)
864 			return -ERANGE;
865 
866 		memset(&pe, 0, sizeof(pe));
867 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
868 		pe.index = tid;
869 
870 		priv->prs_double_vlans[ai] = true;
871 
872 		mvpp2_prs_match_etype(&pe, 0, tpid1);
873 		mvpp2_prs_match_etype(&pe, 4, tpid2);
874 
875 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
876 		/* Shift 4 bytes - skip outer vlan tag */
877 		mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
878 					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
879 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
880 					 MVPP2_PRS_RI_VLAN_MASK);
881 		mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
882 					 MVPP2_PRS_SRAM_AI_MASK);
883 
884 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
885 	} else {
886 		mvpp2_prs_init_from_hw(priv, &pe, tid);
887 	}
888 
889 	/* Update ports' mask */
890 	mvpp2_prs_tcam_port_map_set(&pe, port_map);
891 	mvpp2_prs_hw_write(priv, &pe);
892 
893 	return ret;
894 }
895 
896 /* IPv4 header parsing for fragmentation and L4 offset */
897 static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
898 			       unsigned int ri, unsigned int ri_mask)
899 {
900 	struct mvpp2_prs_entry pe;
901 	int tid;
902 
903 	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
904 	    (proto != IPPROTO_IGMP))
905 		return -EINVAL;
906 
907 	/* Not fragmented packet */
908 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
909 					MVPP2_PE_LAST_FREE_TID);
910 	if (tid < 0)
911 		return tid;
912 
913 	memset(&pe, 0, sizeof(pe));
914 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
915 	pe.index = tid;
916 
917 	/* Finished: go to flowid generation */
918 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
919 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
920 
921 	/* Set L4 offset */
922 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
923 				  sizeof(struct iphdr) - 4,
924 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
925 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
926 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
927 
928 	mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
929 				     MVPP2_PRS_TCAM_PROTO_MASK_L);
930 	mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
931 				     MVPP2_PRS_TCAM_PROTO_MASK);
932 
933 	mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
934 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
935 				 MVPP2_PRS_IPV4_DIP_AI_BIT);
936 	/* Unmask all ports */
937 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
938 
939 	/* Update shadow table and hw entry */
940 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
941 	mvpp2_prs_hw_write(priv, &pe);
942 
943 	/* Fragmented packet */
944 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
945 					MVPP2_PE_LAST_FREE_TID);
946 	if (tid < 0)
947 		return tid;
948 
949 	pe.index = tid;
950 	/* Clear ri before updating */
951 	pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
952 	pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
953 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
954 
955 	mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
956 				 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
957 
958 	mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
959 	mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
960 
961 	/* Update shadow table and hw entry */
962 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
963 	mvpp2_prs_hw_write(priv, &pe);
964 
965 	return 0;
966 }
967 
968 /* IPv4 L3 multicast or broadcast */
969 static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
970 {
971 	struct mvpp2_prs_entry pe;
972 	int mask, tid;
973 
974 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
975 					MVPP2_PE_LAST_FREE_TID);
976 	if (tid < 0)
977 		return tid;
978 
979 	memset(&pe, 0, sizeof(pe));
980 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
981 	pe.index = tid;
982 
983 	switch (l3_cast) {
984 	case MVPP2_PRS_L3_MULTI_CAST:
985 		mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
986 					     MVPP2_PRS_IPV4_MC_MASK);
987 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
988 					 MVPP2_PRS_RI_L3_ADDR_MASK);
989 		break;
990 	case  MVPP2_PRS_L3_BROAD_CAST:
991 		mask = MVPP2_PRS_IPV4_BC_MASK;
992 		mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
993 		mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
994 		mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
995 		mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
996 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
997 					 MVPP2_PRS_RI_L3_ADDR_MASK);
998 		break;
999 	default:
1000 		return -EINVAL;
1001 	}
1002 
1003 	/* Go again to ipv4 */
1004 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1005 
1006 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1007 				 MVPP2_PRS_IPV4_DIP_AI_BIT);
1008 
1009 	/* Shift back to IPv4 proto */
1010 	mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1011 
1012 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1013 
1014 	/* Unmask all ports */
1015 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1016 
1017 	/* Update shadow table and hw entry */
1018 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1019 	mvpp2_prs_hw_write(priv, &pe);
1020 
1021 	return 0;
1022 }
1023 
1024 /* Set entries for protocols over IPv6  */
1025 static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
1026 			       unsigned int ri, unsigned int ri_mask)
1027 {
1028 	struct mvpp2_prs_entry pe;
1029 	int tid;
1030 
1031 	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
1032 	    (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
1033 		return -EINVAL;
1034 
1035 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1036 					MVPP2_PE_LAST_FREE_TID);
1037 	if (tid < 0)
1038 		return tid;
1039 
1040 	memset(&pe, 0, sizeof(pe));
1041 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1042 	pe.index = tid;
1043 
1044 	/* Finished: go to flowid generation */
1045 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1046 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1047 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
1048 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1049 				  sizeof(struct ipv6hdr) - 6,
1050 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1051 
1052 	mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
1053 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1054 				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1055 	/* Unmask all ports */
1056 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1057 
1058 	/* Write HW */
1059 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1060 	mvpp2_prs_hw_write(priv, &pe);
1061 
1062 	return 0;
1063 }
1064 
1065 /* IPv6 L3 multicast entry */
1066 static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
1067 {
1068 	struct mvpp2_prs_entry pe;
1069 	int tid;
1070 
1071 	if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
1072 		return -EINVAL;
1073 
1074 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1075 					MVPP2_PE_LAST_FREE_TID);
1076 	if (tid < 0)
1077 		return tid;
1078 
1079 	memset(&pe, 0, sizeof(pe));
1080 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1081 	pe.index = tid;
1082 
1083 	/* Finished: go to flowid generation */
1084 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1085 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
1086 				 MVPP2_PRS_RI_L3_ADDR_MASK);
1087 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1088 				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1089 	/* Shift back to IPv6 NH */
1090 	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1091 
1092 	mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
1093 				     MVPP2_PRS_IPV6_MC_MASK);
1094 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1095 	/* Unmask all ports */
1096 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1097 
1098 	/* Update shadow table and hw entry */
1099 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1100 	mvpp2_prs_hw_write(priv, &pe);
1101 
1102 	return 0;
1103 }
1104 
1105 /* Parser per-port initialization */
1106 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
1107 				   int lu_max, int offset)
1108 {
1109 	u32 val;
1110 
1111 	/* Set lookup ID */
1112 	val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
1113 	val &= ~MVPP2_PRS_PORT_LU_MASK(port);
1114 	val |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
1115 	mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
1116 
1117 	/* Set maximum number of loops for packet received from port */
1118 	val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
1119 	val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
1120 	val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
1121 	mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
1122 
1123 	/* Set initial offset for packet header extraction for the first
1124 	 * searching loop
1125 	 */
1126 	val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
1127 	val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
1128 	val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
1129 	mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
1130 }
1131 
1132 /* Default flow entries initialization for all ports */
1133 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
1134 {
1135 	struct mvpp2_prs_entry pe;
1136 	int port;
1137 
1138 	for (port = 0; port < MVPP2_MAX_PORTS; port++) {
1139 		memset(&pe, 0, sizeof(pe));
1140 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1141 		pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
1142 
1143 		/* Mask all ports */
1144 		mvpp2_prs_tcam_port_map_set(&pe, 0);
1145 
1146 		/* Set flow ID*/
1147 		mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
1148 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
1149 
1150 		/* Update shadow table and hw entry */
1151 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
1152 		mvpp2_prs_hw_write(priv, &pe);
1153 	}
1154 }
1155 
1156 /* Set default entry for Marvell Header field */
1157 static void mvpp2_prs_mh_init(struct mvpp2 *priv)
1158 {
1159 	struct mvpp2_prs_entry pe;
1160 
1161 	memset(&pe, 0, sizeof(pe));
1162 
1163 	pe.index = MVPP2_PE_MH_DEFAULT;
1164 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1165 	mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1166 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1167 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
1168 
1169 	/* Unmask all ports */
1170 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1171 
1172 	/* Update shadow table and hw entry */
1173 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1174 	mvpp2_prs_hw_write(priv, &pe);
1175 
1176 	/* Set MH entry that skip parser */
1177 	pe.index = MVPP2_PE_MH_SKIP_PRS;
1178 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1179 	mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1180 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1181 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1182 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1183 
1184 	/* Mask all ports */
1185 	mvpp2_prs_tcam_port_map_set(&pe, 0);
1186 
1187 	/* Update shadow table and hw entry */
1188 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1189 	mvpp2_prs_hw_write(priv, &pe);
1190 }
1191 
1192 /* Set default entires (place holder) for promiscuous, non-promiscuous and
1193  * multicast MAC addresses
1194  */
1195 static void mvpp2_prs_mac_init(struct mvpp2 *priv)
1196 {
1197 	struct mvpp2_prs_entry pe;
1198 
1199 	memset(&pe, 0, sizeof(pe));
1200 
1201 	/* Non-promiscuous mode for all ports - DROP unknown packets */
1202 	pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
1203 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1204 
1205 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1206 				 MVPP2_PRS_RI_DROP_MASK);
1207 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1208 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1209 
1210 	/* Unmask all ports */
1211 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1212 
1213 	/* Update shadow table and hw entry */
1214 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1215 	mvpp2_prs_hw_write(priv, &pe);
1216 
1217 	/* Create dummy entries for drop all and promiscuous modes */
1218 	mvpp2_prs_drop_fc(priv);
1219 	mvpp2_prs_mac_drop_all_set(priv, 0, false);
1220 	mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
1221 	mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
1222 }
1223 
1224 /* Set default entries for various types of dsa packets */
1225 static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
1226 {
1227 	struct mvpp2_prs_entry pe;
1228 
1229 	/* None tagged EDSA entry - place holder */
1230 	mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
1231 			      MVPP2_PRS_EDSA);
1232 
1233 	/* Tagged EDSA entry - place holder */
1234 	mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
1235 
1236 	/* None tagged DSA entry - place holder */
1237 	mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
1238 			      MVPP2_PRS_DSA);
1239 
1240 	/* Tagged DSA entry - place holder */
1241 	mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
1242 
1243 	/* None tagged EDSA ethertype entry - place holder*/
1244 	mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
1245 					MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
1246 
1247 	/* Tagged EDSA ethertype entry - place holder*/
1248 	mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
1249 					MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
1250 
1251 	/* None tagged DSA ethertype entry */
1252 	mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
1253 					MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
1254 
1255 	/* Tagged DSA ethertype entry */
1256 	mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
1257 					MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
1258 
1259 	/* Set default entry, in case DSA or EDSA tag not found */
1260 	memset(&pe, 0, sizeof(pe));
1261 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1262 	pe.index = MVPP2_PE_DSA_DEFAULT;
1263 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1264 
1265 	/* Shift 0 bytes */
1266 	mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1267 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1268 
1269 	/* Clear all sram ai bits for next iteration */
1270 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1271 
1272 	/* Unmask all ports */
1273 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1274 
1275 	mvpp2_prs_hw_write(priv, &pe);
1276 }
1277 
1278 /* Initialize parser entries for VID filtering */
1279 static void mvpp2_prs_vid_init(struct mvpp2 *priv)
1280 {
1281 	struct mvpp2_prs_entry pe;
1282 
1283 	memset(&pe, 0, sizeof(pe));
1284 
1285 	/* Set default vid entry */
1286 	pe.index = MVPP2_PE_VID_FLTR_DEFAULT;
1287 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1288 
1289 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT);
1290 
1291 	/* Skip VLAN header - Set offset to 4 bytes */
1292 	mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
1293 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1294 
1295 	/* Clear all ai bits for next iteration */
1296 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1297 
1298 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1299 
1300 	/* Unmask all ports */
1301 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1302 
1303 	/* Update shadow table and hw entry */
1304 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
1305 	mvpp2_prs_hw_write(priv, &pe);
1306 
1307 	/* Set default vid entry for extended DSA*/
1308 	memset(&pe, 0, sizeof(pe));
1309 
1310 	/* Set default vid entry */
1311 	pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT;
1312 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1313 
1314 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT,
1315 				 MVPP2_PRS_EDSA_VID_AI_BIT);
1316 
1317 	/* Skip VLAN header - Set offset to 8 bytes */
1318 	mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN,
1319 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1320 
1321 	/* Clear all ai bits for next iteration */
1322 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1323 
1324 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1325 
1326 	/* Unmask all ports */
1327 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1328 
1329 	/* Update shadow table and hw entry */
1330 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
1331 	mvpp2_prs_hw_write(priv, &pe);
1332 }
1333 
1334 /* Match basic ethertypes */
1335 static int mvpp2_prs_etype_init(struct mvpp2 *priv)
1336 {
1337 	struct mvpp2_prs_entry pe;
1338 	int tid;
1339 
1340 	/* Ethertype: PPPoE */
1341 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1342 					MVPP2_PE_LAST_FREE_TID);
1343 	if (tid < 0)
1344 		return tid;
1345 
1346 	memset(&pe, 0, sizeof(pe));
1347 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1348 	pe.index = tid;
1349 
1350 	mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
1351 
1352 	mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
1353 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1354 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1355 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
1356 				 MVPP2_PRS_RI_PPPOE_MASK);
1357 
1358 	/* Update shadow table and hw entry */
1359 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1360 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1361 	priv->prs_shadow[pe.index].finish = false;
1362 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
1363 				MVPP2_PRS_RI_PPPOE_MASK);
1364 	mvpp2_prs_hw_write(priv, &pe);
1365 
1366 	/* Ethertype: ARP */
1367 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1368 					MVPP2_PE_LAST_FREE_TID);
1369 	if (tid < 0)
1370 		return tid;
1371 
1372 	memset(&pe, 0, sizeof(pe));
1373 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1374 	pe.index = tid;
1375 
1376 	mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
1377 
1378 	/* Generate flow in the next iteration*/
1379 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1380 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1381 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
1382 				 MVPP2_PRS_RI_L3_PROTO_MASK);
1383 	/* Set L3 offset */
1384 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1385 				  MVPP2_ETH_TYPE_LEN,
1386 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1387 
1388 	/* Update shadow table and hw entry */
1389 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1390 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1391 	priv->prs_shadow[pe.index].finish = true;
1392 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
1393 				MVPP2_PRS_RI_L3_PROTO_MASK);
1394 	mvpp2_prs_hw_write(priv, &pe);
1395 
1396 	/* Ethertype: LBTD */
1397 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1398 					MVPP2_PE_LAST_FREE_TID);
1399 	if (tid < 0)
1400 		return tid;
1401 
1402 	memset(&pe, 0, sizeof(pe));
1403 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1404 	pe.index = tid;
1405 
1406 	mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
1407 
1408 	/* Generate flow in the next iteration*/
1409 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1410 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1411 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1412 				 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1413 				 MVPP2_PRS_RI_CPU_CODE_MASK |
1414 				 MVPP2_PRS_RI_UDF3_MASK);
1415 	/* Set L3 offset */
1416 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1417 				  MVPP2_ETH_TYPE_LEN,
1418 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1419 
1420 	/* Update shadow table and hw entry */
1421 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1422 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1423 	priv->prs_shadow[pe.index].finish = true;
1424 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1425 				MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1426 				MVPP2_PRS_RI_CPU_CODE_MASK |
1427 				MVPP2_PRS_RI_UDF3_MASK);
1428 	mvpp2_prs_hw_write(priv, &pe);
1429 
1430 	/* Ethertype: IPv4 without options */
1431 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1432 					MVPP2_PE_LAST_FREE_TID);
1433 	if (tid < 0)
1434 		return tid;
1435 
1436 	memset(&pe, 0, sizeof(pe));
1437 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1438 	pe.index = tid;
1439 
1440 	mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
1441 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1442 				     MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1443 				     MVPP2_PRS_IPV4_HEAD_MASK |
1444 				     MVPP2_PRS_IPV4_IHL_MASK);
1445 
1446 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1447 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1448 				 MVPP2_PRS_RI_L3_PROTO_MASK);
1449 	/* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
1450 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
1451 				 sizeof(struct iphdr) - 4,
1452 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1453 	/* Set L3 offset */
1454 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1455 				  MVPP2_ETH_TYPE_LEN,
1456 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1457 
1458 	/* Update shadow table and hw entry */
1459 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1460 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1461 	priv->prs_shadow[pe.index].finish = false;
1462 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
1463 				MVPP2_PRS_RI_L3_PROTO_MASK);
1464 	mvpp2_prs_hw_write(priv, &pe);
1465 
1466 	/* Ethertype: IPv4 with options */
1467 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1468 					MVPP2_PE_LAST_FREE_TID);
1469 	if (tid < 0)
1470 		return tid;
1471 
1472 	pe.index = tid;
1473 
1474 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1475 				     MVPP2_PRS_IPV4_HEAD,
1476 				     MVPP2_PRS_IPV4_HEAD_MASK);
1477 
1478 	/* Clear ri before updating */
1479 	pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1480 	pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1481 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1482 				 MVPP2_PRS_RI_L3_PROTO_MASK);
1483 
1484 	/* Update shadow table and hw entry */
1485 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1486 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1487 	priv->prs_shadow[pe.index].finish = false;
1488 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
1489 				MVPP2_PRS_RI_L3_PROTO_MASK);
1490 	mvpp2_prs_hw_write(priv, &pe);
1491 
1492 	/* Ethertype: IPv6 without options */
1493 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1494 					MVPP2_PE_LAST_FREE_TID);
1495 	if (tid < 0)
1496 		return tid;
1497 
1498 	memset(&pe, 0, sizeof(pe));
1499 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1500 	pe.index = tid;
1501 
1502 	mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
1503 
1504 	/* Skip DIP of IPV6 header */
1505 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1506 				 MVPP2_MAX_L3_ADDR_SIZE,
1507 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1508 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1509 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1510 				 MVPP2_PRS_RI_L3_PROTO_MASK);
1511 	/* Set L3 offset */
1512 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1513 				  MVPP2_ETH_TYPE_LEN,
1514 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1515 
1516 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1517 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1518 	priv->prs_shadow[pe.index].finish = false;
1519 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
1520 				MVPP2_PRS_RI_L3_PROTO_MASK);
1521 	mvpp2_prs_hw_write(priv, &pe);
1522 
1523 	/* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
1524 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1525 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1526 	pe.index = MVPP2_PE_ETH_TYPE_UN;
1527 
1528 	/* Unmask all ports */
1529 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1530 
1531 	/* Generate flow in the next iteration*/
1532 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1533 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1534 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1535 				 MVPP2_PRS_RI_L3_PROTO_MASK);
1536 	/* Set L3 offset even it's unknown L3 */
1537 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1538 				  MVPP2_ETH_TYPE_LEN,
1539 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1540 
1541 	/* Update shadow table and hw entry */
1542 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1543 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1544 	priv->prs_shadow[pe.index].finish = true;
1545 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
1546 				MVPP2_PRS_RI_L3_PROTO_MASK);
1547 	mvpp2_prs_hw_write(priv, &pe);
1548 
1549 	return 0;
1550 }
1551 
1552 /* Configure vlan entries and detect up to 2 successive VLAN tags.
1553  * Possible options:
1554  * 0x8100, 0x88A8
1555  * 0x8100, 0x8100
1556  * 0x8100
1557  * 0x88A8
1558  */
1559 static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
1560 {
1561 	struct mvpp2_prs_entry pe;
1562 	int err;
1563 
1564 	priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
1565 					      MVPP2_PRS_DBL_VLANS_MAX,
1566 					      GFP_KERNEL);
1567 	if (!priv->prs_double_vlans)
1568 		return -ENOMEM;
1569 
1570 	/* Double VLAN: 0x8100, 0x88A8 */
1571 	err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
1572 					MVPP2_PRS_PORT_MASK);
1573 	if (err)
1574 		return err;
1575 
1576 	/* Double VLAN: 0x8100, 0x8100 */
1577 	err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
1578 					MVPP2_PRS_PORT_MASK);
1579 	if (err)
1580 		return err;
1581 
1582 	/* Single VLAN: 0x88a8 */
1583 	err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
1584 				 MVPP2_PRS_PORT_MASK);
1585 	if (err)
1586 		return err;
1587 
1588 	/* Single VLAN: 0x8100 */
1589 	err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
1590 				 MVPP2_PRS_PORT_MASK);
1591 	if (err)
1592 		return err;
1593 
1594 	/* Set default double vlan entry */
1595 	memset(&pe, 0, sizeof(pe));
1596 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1597 	pe.index = MVPP2_PE_VLAN_DBL;
1598 
1599 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
1600 
1601 	/* Clear ai for next iterations */
1602 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1603 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1604 				 MVPP2_PRS_RI_VLAN_MASK);
1605 
1606 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
1607 				 MVPP2_PRS_DBL_VLAN_AI_BIT);
1608 	/* Unmask all ports */
1609 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1610 
1611 	/* Update shadow table and hw entry */
1612 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
1613 	mvpp2_prs_hw_write(priv, &pe);
1614 
1615 	/* Set default vlan none entry */
1616 	memset(&pe, 0, sizeof(pe));
1617 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1618 	pe.index = MVPP2_PE_VLAN_NONE;
1619 
1620 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1621 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1622 				 MVPP2_PRS_RI_VLAN_MASK);
1623 
1624 	/* Unmask all ports */
1625 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1626 
1627 	/* Update shadow table and hw entry */
1628 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
1629 	mvpp2_prs_hw_write(priv, &pe);
1630 
1631 	return 0;
1632 }
1633 
1634 /* Set entries for PPPoE ethertype */
1635 static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
1636 {
1637 	struct mvpp2_prs_entry pe;
1638 	int tid;
1639 
1640 	/* IPv4 over PPPoE with options */
1641 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1642 					MVPP2_PE_LAST_FREE_TID);
1643 	if (tid < 0)
1644 		return tid;
1645 
1646 	memset(&pe, 0, sizeof(pe));
1647 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1648 	pe.index = tid;
1649 
1650 	mvpp2_prs_match_etype(&pe, 0, PPP_IP);
1651 
1652 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1653 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1654 				 MVPP2_PRS_RI_L3_PROTO_MASK);
1655 	/* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
1656 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
1657 				 sizeof(struct iphdr) - 4,
1658 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1659 	/* Set L3 offset */
1660 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1661 				  MVPP2_ETH_TYPE_LEN,
1662 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1663 
1664 	/* Update shadow table and hw entry */
1665 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1666 	mvpp2_prs_hw_write(priv, &pe);
1667 
1668 	/* IPv4 over PPPoE without options */
1669 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1670 					MVPP2_PE_LAST_FREE_TID);
1671 	if (tid < 0)
1672 		return tid;
1673 
1674 	pe.index = tid;
1675 
1676 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1677 				     MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1678 				     MVPP2_PRS_IPV4_HEAD_MASK |
1679 				     MVPP2_PRS_IPV4_IHL_MASK);
1680 
1681 	/* Clear ri before updating */
1682 	pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1683 	pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1684 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1685 				 MVPP2_PRS_RI_L3_PROTO_MASK);
1686 
1687 	/* Update shadow table and hw entry */
1688 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1689 	mvpp2_prs_hw_write(priv, &pe);
1690 
1691 	/* IPv6 over PPPoE */
1692 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1693 					MVPP2_PE_LAST_FREE_TID);
1694 	if (tid < 0)
1695 		return tid;
1696 
1697 	memset(&pe, 0, sizeof(pe));
1698 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1699 	pe.index = tid;
1700 
1701 	mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
1702 
1703 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1704 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1705 				 MVPP2_PRS_RI_L3_PROTO_MASK);
1706 	/* Jump to DIP of IPV6 header */
1707 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1708 				 MVPP2_MAX_L3_ADDR_SIZE,
1709 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1710 	/* Set L3 offset */
1711 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1712 				  MVPP2_ETH_TYPE_LEN,
1713 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1714 
1715 	/* Update shadow table and hw entry */
1716 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1717 	mvpp2_prs_hw_write(priv, &pe);
1718 
1719 	/* Non-IP over PPPoE */
1720 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1721 					MVPP2_PE_LAST_FREE_TID);
1722 	if (tid < 0)
1723 		return tid;
1724 
1725 	memset(&pe, 0, sizeof(pe));
1726 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1727 	pe.index = tid;
1728 
1729 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1730 				 MVPP2_PRS_RI_L3_PROTO_MASK);
1731 
1732 	/* Finished: go to flowid generation */
1733 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1734 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1735 	/* Set L3 offset even if it's unknown L3 */
1736 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1737 				  MVPP2_ETH_TYPE_LEN,
1738 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1739 
1740 	/* Update shadow table and hw entry */
1741 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1742 	mvpp2_prs_hw_write(priv, &pe);
1743 
1744 	return 0;
1745 }
1746 
1747 /* Initialize entries for IPv4 */
1748 static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
1749 {
1750 	struct mvpp2_prs_entry pe;
1751 	int err;
1752 
1753 	/* Set entries for TCP, UDP and IGMP over IPv4 */
1754 	err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1755 				  MVPP2_PRS_RI_L4_PROTO_MASK);
1756 	if (err)
1757 		return err;
1758 
1759 	err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1760 				  MVPP2_PRS_RI_L4_PROTO_MASK);
1761 	if (err)
1762 		return err;
1763 
1764 	err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
1765 				  MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1766 				  MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1767 				  MVPP2_PRS_RI_CPU_CODE_MASK |
1768 				  MVPP2_PRS_RI_UDF3_MASK);
1769 	if (err)
1770 		return err;
1771 
1772 	/* IPv4 Broadcast */
1773 	err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
1774 	if (err)
1775 		return err;
1776 
1777 	/* IPv4 Multicast */
1778 	err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
1779 	if (err)
1780 		return err;
1781 
1782 	/* Default IPv4 entry for unknown protocols */
1783 	memset(&pe, 0, sizeof(pe));
1784 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1785 	pe.index = MVPP2_PE_IP4_PROTO_UN;
1786 
1787 	/* Finished: go to flowid generation */
1788 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1789 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1790 
1791 	/* Set L4 offset */
1792 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1793 				  sizeof(struct iphdr) - 4,
1794 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1795 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1796 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1797 				 MVPP2_PRS_RI_L4_PROTO_MASK);
1798 
1799 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1800 				 MVPP2_PRS_IPV4_DIP_AI_BIT);
1801 	/* Unmask all ports */
1802 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1803 
1804 	/* Update shadow table and hw entry */
1805 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1806 	mvpp2_prs_hw_write(priv, &pe);
1807 
1808 	/* Default IPv4 entry for unicast address */
1809 	memset(&pe, 0, sizeof(pe));
1810 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1811 	pe.index = MVPP2_PE_IP4_ADDR_UN;
1812 
1813 	/* Go again to ipv4 */
1814 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1815 
1816 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1817 				 MVPP2_PRS_IPV4_DIP_AI_BIT);
1818 
1819 	/* Shift back to IPv4 proto */
1820 	mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1821 
1822 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1823 				 MVPP2_PRS_RI_L3_ADDR_MASK);
1824 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1825 
1826 	/* Unmask all ports */
1827 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1828 
1829 	/* Update shadow table and hw entry */
1830 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1831 	mvpp2_prs_hw_write(priv, &pe);
1832 
1833 	return 0;
1834 }
1835 
1836 /* Initialize entries for IPv6 */
1837 static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
1838 {
1839 	struct mvpp2_prs_entry pe;
1840 	int tid, err;
1841 
1842 	/* Set entries for TCP, UDP and ICMP over IPv6 */
1843 	err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
1844 				  MVPP2_PRS_RI_L4_TCP,
1845 				  MVPP2_PRS_RI_L4_PROTO_MASK);
1846 	if (err)
1847 		return err;
1848 
1849 	err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
1850 				  MVPP2_PRS_RI_L4_UDP,
1851 				  MVPP2_PRS_RI_L4_PROTO_MASK);
1852 	if (err)
1853 		return err;
1854 
1855 	err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
1856 				  MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1857 				  MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1858 				  MVPP2_PRS_RI_CPU_CODE_MASK |
1859 				  MVPP2_PRS_RI_UDF3_MASK);
1860 	if (err)
1861 		return err;
1862 
1863 	/* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
1864 	/* Result Info: UDF7=1, DS lite */
1865 	err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
1866 				  MVPP2_PRS_RI_UDF7_IP6_LITE,
1867 				  MVPP2_PRS_RI_UDF7_MASK);
1868 	if (err)
1869 		return err;
1870 
1871 	/* IPv6 multicast */
1872 	err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
1873 	if (err)
1874 		return err;
1875 
1876 	/* Entry for checking hop limit */
1877 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1878 					MVPP2_PE_LAST_FREE_TID);
1879 	if (tid < 0)
1880 		return tid;
1881 
1882 	memset(&pe, 0, sizeof(pe));
1883 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1884 	pe.index = tid;
1885 
1886 	/* Finished: go to flowid generation */
1887 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1888 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1889 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
1890 				 MVPP2_PRS_RI_DROP_MASK,
1891 				 MVPP2_PRS_RI_L3_PROTO_MASK |
1892 				 MVPP2_PRS_RI_DROP_MASK);
1893 
1894 	mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
1895 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1896 				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1897 
1898 	/* Update shadow table and hw entry */
1899 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1900 	mvpp2_prs_hw_write(priv, &pe);
1901 
1902 	/* Default IPv6 entry for unknown protocols */
1903 	memset(&pe, 0, sizeof(pe));
1904 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1905 	pe.index = MVPP2_PE_IP6_PROTO_UN;
1906 
1907 	/* Finished: go to flowid generation */
1908 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1909 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1910 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1911 				 MVPP2_PRS_RI_L4_PROTO_MASK);
1912 	/* Set L4 offset relatively to our current place */
1913 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1914 				  sizeof(struct ipv6hdr) - 4,
1915 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1916 
1917 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1918 				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1919 	/* Unmask all ports */
1920 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1921 
1922 	/* Update shadow table and hw entry */
1923 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1924 	mvpp2_prs_hw_write(priv, &pe);
1925 
1926 	/* Default IPv6 entry for unknown ext protocols */
1927 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1928 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1929 	pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
1930 
1931 	/* Finished: go to flowid generation */
1932 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1933 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1934 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1935 				 MVPP2_PRS_RI_L4_PROTO_MASK);
1936 
1937 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
1938 				 MVPP2_PRS_IPV6_EXT_AI_BIT);
1939 	/* Unmask all ports */
1940 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1941 
1942 	/* Update shadow table and hw entry */
1943 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1944 	mvpp2_prs_hw_write(priv, &pe);
1945 
1946 	/* Default IPv6 entry for unicast address */
1947 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1948 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1949 	pe.index = MVPP2_PE_IP6_ADDR_UN;
1950 
1951 	/* Finished: go to IPv6 again */
1952 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1953 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1954 				 MVPP2_PRS_RI_L3_ADDR_MASK);
1955 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1956 				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1957 	/* Shift back to IPV6 NH */
1958 	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1959 
1960 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1961 	/* Unmask all ports */
1962 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1963 
1964 	/* Update shadow table and hw entry */
1965 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1966 	mvpp2_prs_hw_write(priv, &pe);
1967 
1968 	return 0;
1969 }
1970 
1971 /* Find tcam entry with matched pair <vid,port> */
1972 static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask)
1973 {
1974 	unsigned char byte[2], enable[2];
1975 	struct mvpp2_prs_entry pe;
1976 	u16 rvid, rmask;
1977 	int tid;
1978 
1979 	/* Go through the all entries with MVPP2_PRS_LU_VID */
1980 	for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
1981 	     tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
1982 		if (!port->priv->prs_shadow[tid].valid ||
1983 		    port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
1984 			continue;
1985 
1986 		mvpp2_prs_init_from_hw(port->priv, &pe, tid);
1987 
1988 		mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
1989 		mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
1990 
1991 		rvid = ((byte[0] & 0xf) << 8) + byte[1];
1992 		rmask = ((enable[0] & 0xf) << 8) + enable[1];
1993 
1994 		if (rvid != vid || rmask != mask)
1995 			continue;
1996 
1997 		return tid;
1998 	}
1999 
2000 	return -ENOENT;
2001 }
2002 
2003 /* Write parser entry for VID filtering */
2004 int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
2005 {
2006 	unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START +
2007 				 port->id * MVPP2_PRS_VLAN_FILT_MAX;
2008 	unsigned int mask = 0xfff, reg_val, shift;
2009 	struct mvpp2 *priv = port->priv;
2010 	struct mvpp2_prs_entry pe;
2011 	int tid;
2012 
2013 	memset(&pe, 0, sizeof(pe));
2014 
2015 	/* Scan TCAM and see if entry with this <vid,port> already exist */
2016 	tid = mvpp2_prs_vid_range_find(port, vid, mask);
2017 
2018 	reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
2019 	if (reg_val & MVPP2_DSA_EXTENDED)
2020 		shift = MVPP2_VLAN_TAG_EDSA_LEN;
2021 	else
2022 		shift = MVPP2_VLAN_TAG_LEN;
2023 
2024 	/* No such entry */
2025 	if (tid < 0) {
2026 
2027 		/* Go through all entries from first to last in vlan range */
2028 		tid = mvpp2_prs_tcam_first_free(priv, vid_start,
2029 						vid_start +
2030 						MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
2031 
2032 		/* There isn't room for a new VID filter */
2033 		if (tid < 0)
2034 			return tid;
2035 
2036 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2037 		pe.index = tid;
2038 
2039 		/* Mask all ports */
2040 		mvpp2_prs_tcam_port_map_set(&pe, 0);
2041 	} else {
2042 		mvpp2_prs_init_from_hw(priv, &pe, tid);
2043 	}
2044 
2045 	/* Enable the current port */
2046 	mvpp2_prs_tcam_port_set(&pe, port->id, true);
2047 
2048 	/* Continue - set next lookup */
2049 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2050 
2051 	/* Skip VLAN header - Set offset to 4 or 8 bytes */
2052 	mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2053 
2054 	/* Set match on VID */
2055 	mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid);
2056 
2057 	/* Clear all ai bits for next iteration */
2058 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2059 
2060 	/* Update shadow table */
2061 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2062 	mvpp2_prs_hw_write(priv, &pe);
2063 
2064 	return 0;
2065 }
2066 
2067 /* Write parser entry for VID filtering */
2068 void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
2069 {
2070 	struct mvpp2 *priv = port->priv;
2071 	int tid;
2072 
2073 	/* Scan TCAM and see if entry with this <vid,port> already exist */
2074 	tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
2075 
2076 	/* No such entry */
2077 	if (tid < 0)
2078 		return;
2079 
2080 	mvpp2_prs_hw_inv(priv, tid);
2081 	priv->prs_shadow[tid].valid = false;
2082 }
2083 
2084 /* Remove all existing VID filters on this port */
2085 void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
2086 {
2087 	struct mvpp2 *priv = port->priv;
2088 	int tid;
2089 
2090 	for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
2091 	     tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
2092 		if (priv->prs_shadow[tid].valid) {
2093 			mvpp2_prs_hw_inv(priv, tid);
2094 			priv->prs_shadow[tid].valid = false;
2095 		}
2096 	}
2097 }
2098 
2099 /* Remove VID filering entry for this port */
2100 void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
2101 {
2102 	unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
2103 	struct mvpp2 *priv = port->priv;
2104 
2105 	/* Invalidate the guard entry */
2106 	mvpp2_prs_hw_inv(priv, tid);
2107 
2108 	priv->prs_shadow[tid].valid = false;
2109 }
2110 
2111 /* Add guard entry that drops packets when no VID is matched on this port */
2112 void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
2113 {
2114 	unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
2115 	struct mvpp2 *priv = port->priv;
2116 	unsigned int reg_val, shift;
2117 	struct mvpp2_prs_entry pe;
2118 
2119 	if (priv->prs_shadow[tid].valid)
2120 		return;
2121 
2122 	memset(&pe, 0, sizeof(pe));
2123 
2124 	pe.index = tid;
2125 
2126 	reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
2127 	if (reg_val & MVPP2_DSA_EXTENDED)
2128 		shift = MVPP2_VLAN_TAG_EDSA_LEN;
2129 	else
2130 		shift = MVPP2_VLAN_TAG_LEN;
2131 
2132 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2133 
2134 	/* Mask all ports */
2135 	mvpp2_prs_tcam_port_map_set(&pe, 0);
2136 
2137 	/* Update port mask */
2138 	mvpp2_prs_tcam_port_set(&pe, port->id, true);
2139 
2140 	/* Continue - set next lookup */
2141 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2142 
2143 	/* Skip VLAN header - Set offset to 4 or 8 bytes */
2144 	mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2145 
2146 	/* Drop VLAN packets that don't belong to any VIDs on this port */
2147 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2148 				 MVPP2_PRS_RI_DROP_MASK);
2149 
2150 	/* Clear all ai bits for next iteration */
2151 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2152 
2153 	/* Update shadow table */
2154 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2155 	mvpp2_prs_hw_write(priv, &pe);
2156 }
2157 
2158 /* Parser default initialization */
2159 int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
2160 {
2161 	int err, index, i;
2162 
2163 	/* Enable tcam table */
2164 	mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2165 
2166 	/* Clear all tcam and sram entries */
2167 	for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2168 		mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2169 		for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2170 			mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2171 
2172 		mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2173 		for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2174 			mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2175 	}
2176 
2177 	/* Invalidate all tcam entries */
2178 	for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2179 		mvpp2_prs_hw_inv(priv, index);
2180 
2181 	priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2182 					sizeof(*priv->prs_shadow),
2183 					GFP_KERNEL);
2184 	if (!priv->prs_shadow)
2185 		return -ENOMEM;
2186 
2187 	/* Always start from lookup = 0 */
2188 	for (index = 0; index < MVPP2_MAX_PORTS; index++)
2189 		mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2190 				       MVPP2_PRS_PORT_LU_MAX, 0);
2191 
2192 	mvpp2_prs_def_flow_init(priv);
2193 
2194 	mvpp2_prs_mh_init(priv);
2195 
2196 	mvpp2_prs_mac_init(priv);
2197 
2198 	mvpp2_prs_dsa_init(priv);
2199 
2200 	mvpp2_prs_vid_init(priv);
2201 
2202 	err = mvpp2_prs_etype_init(priv);
2203 	if (err)
2204 		return err;
2205 
2206 	err = mvpp2_prs_vlan_init(pdev, priv);
2207 	if (err)
2208 		return err;
2209 
2210 	err = mvpp2_prs_pppoe_init(priv);
2211 	if (err)
2212 		return err;
2213 
2214 	err = mvpp2_prs_ip6_init(priv);
2215 	if (err)
2216 		return err;
2217 
2218 	err = mvpp2_prs_ip4_init(priv);
2219 	if (err)
2220 		return err;
2221 
2222 	return 0;
2223 }
2224 
2225 /* Compare MAC DA with tcam entry data */
2226 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2227 				       const u8 *da, unsigned char *mask)
2228 {
2229 	unsigned char tcam_byte, tcam_mask;
2230 	int index;
2231 
2232 	for (index = 0; index < ETH_ALEN; index++) {
2233 		mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2234 		if (tcam_mask != mask[index])
2235 			return false;
2236 
2237 		if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2238 			return false;
2239 	}
2240 
2241 	return true;
2242 }
2243 
2244 /* Find tcam entry with matched pair <MAC DA, port> */
2245 static int
2246 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2247 			    unsigned char *mask, int udf_type)
2248 {
2249 	struct mvpp2_prs_entry pe;
2250 	int tid;
2251 
2252 	/* Go through the all entires with MVPP2_PRS_LU_MAC */
2253 	for (tid = MVPP2_PE_MAC_RANGE_START;
2254 	     tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
2255 		unsigned int entry_pmap;
2256 
2257 		if (!priv->prs_shadow[tid].valid ||
2258 		    (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2259 		    (priv->prs_shadow[tid].udf != udf_type))
2260 			continue;
2261 
2262 		mvpp2_prs_init_from_hw(priv, &pe, tid);
2263 		entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
2264 
2265 		if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
2266 		    entry_pmap == pmap)
2267 			return tid;
2268 	}
2269 
2270 	return -ENOENT;
2271 }
2272 
2273 /* Update parser's mac da entry */
2274 int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
2275 {
2276 	unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2277 	struct mvpp2 *priv = port->priv;
2278 	unsigned int pmap, len, ri;
2279 	struct mvpp2_prs_entry pe;
2280 	int tid;
2281 
2282 	memset(&pe, 0, sizeof(pe));
2283 
2284 	/* Scan TCAM and see if entry with this <MAC DA, port> already exist */
2285 	tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask,
2286 					  MVPP2_PRS_UDF_MAC_DEF);
2287 
2288 	/* No such entry */
2289 	if (tid < 0) {
2290 		if (!add)
2291 			return 0;
2292 
2293 		/* Create new TCAM entry */
2294 		/* Go through the all entries from first to last */
2295 		tid = mvpp2_prs_tcam_first_free(priv,
2296 						MVPP2_PE_MAC_RANGE_START,
2297 						MVPP2_PE_MAC_RANGE_END);
2298 		if (tid < 0)
2299 			return tid;
2300 
2301 		pe.index = tid;
2302 
2303 		/* Mask all ports */
2304 		mvpp2_prs_tcam_port_map_set(&pe, 0);
2305 	} else {
2306 		mvpp2_prs_init_from_hw(priv, &pe, tid);
2307 	}
2308 
2309 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2310 
2311 	/* Update port mask */
2312 	mvpp2_prs_tcam_port_set(&pe, port->id, add);
2313 
2314 	/* Invalidate the entry if no ports are left enabled */
2315 	pmap = mvpp2_prs_tcam_port_map_get(&pe);
2316 	if (pmap == 0) {
2317 		if (add)
2318 			return -EINVAL;
2319 
2320 		mvpp2_prs_hw_inv(priv, pe.index);
2321 		priv->prs_shadow[pe.index].valid = false;
2322 		return 0;
2323 	}
2324 
2325 	/* Continue - set next lookup */
2326 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
2327 
2328 	/* Set match on DA */
2329 	len = ETH_ALEN;
2330 	while (len--)
2331 		mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
2332 
2333 	/* Set result info bits */
2334 	if (is_broadcast_ether_addr(da)) {
2335 		ri = MVPP2_PRS_RI_L2_BCAST;
2336 	} else if (is_multicast_ether_addr(da)) {
2337 		ri = MVPP2_PRS_RI_L2_MCAST;
2338 	} else {
2339 		ri = MVPP2_PRS_RI_L2_UCAST;
2340 
2341 		if (ether_addr_equal(da, port->dev->dev_addr))
2342 			ri |= MVPP2_PRS_RI_MAC_ME_MASK;
2343 	}
2344 
2345 	mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2346 				 MVPP2_PRS_RI_MAC_ME_MASK);
2347 	mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2348 				MVPP2_PRS_RI_MAC_ME_MASK);
2349 
2350 	/* Shift to ethertype */
2351 	mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
2352 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2353 
2354 	/* Update shadow table and hw entry */
2355 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
2356 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2357 	mvpp2_prs_hw_write(priv, &pe);
2358 
2359 	return 0;
2360 }
2361 
2362 int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
2363 {
2364 	struct mvpp2_port *port = netdev_priv(dev);
2365 	int err;
2366 
2367 	/* Remove old parser entry */
2368 	err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false);
2369 	if (err)
2370 		return err;
2371 
2372 	/* Add new parser entry */
2373 	err = mvpp2_prs_mac_da_accept(port, da, true);
2374 	if (err)
2375 		return err;
2376 
2377 	/* Set addr in the device */
2378 	ether_addr_copy(dev->dev_addr, da);
2379 
2380 	return 0;
2381 }
2382 
2383 void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
2384 {
2385 	struct mvpp2 *priv = port->priv;
2386 	struct mvpp2_prs_entry pe;
2387 	unsigned long pmap;
2388 	int index, tid;
2389 
2390 	for (tid = MVPP2_PE_MAC_RANGE_START;
2391 	     tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
2392 		unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
2393 
2394 		if (!priv->prs_shadow[tid].valid ||
2395 		    (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2396 		    (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
2397 			continue;
2398 
2399 		mvpp2_prs_init_from_hw(priv, &pe, tid);
2400 
2401 		pmap = mvpp2_prs_tcam_port_map_get(&pe);
2402 
2403 		/* We only want entries active on this port */
2404 		if (!test_bit(port->id, &pmap))
2405 			continue;
2406 
2407 		/* Read mac addr from entry */
2408 		for (index = 0; index < ETH_ALEN; index++)
2409 			mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
2410 						     &da_mask[index]);
2411 
2412 		/* Special cases : Don't remove broadcast and port's own
2413 		 * address
2414 		 */
2415 		if (is_broadcast_ether_addr(da) ||
2416 		    ether_addr_equal(da, port->dev->dev_addr))
2417 			continue;
2418 
2419 		/* Remove entry from TCAM */
2420 		mvpp2_prs_mac_da_accept(port, da, false);
2421 	}
2422 }
2423 
2424 int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
2425 {
2426 	switch (type) {
2427 	case MVPP2_TAG_TYPE_EDSA:
2428 		/* Add port to EDSA entries */
2429 		mvpp2_prs_dsa_tag_set(priv, port, true,
2430 				      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2431 		mvpp2_prs_dsa_tag_set(priv, port, true,
2432 				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2433 		/* Remove port from DSA entries */
2434 		mvpp2_prs_dsa_tag_set(priv, port, false,
2435 				      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2436 		mvpp2_prs_dsa_tag_set(priv, port, false,
2437 				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2438 		break;
2439 
2440 	case MVPP2_TAG_TYPE_DSA:
2441 		/* Add port to DSA entries */
2442 		mvpp2_prs_dsa_tag_set(priv, port, true,
2443 				      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2444 		mvpp2_prs_dsa_tag_set(priv, port, true,
2445 				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2446 		/* Remove port from EDSA entries */
2447 		mvpp2_prs_dsa_tag_set(priv, port, false,
2448 				      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2449 		mvpp2_prs_dsa_tag_set(priv, port, false,
2450 				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2451 		break;
2452 
2453 	case MVPP2_TAG_TYPE_MH:
2454 	case MVPP2_TAG_TYPE_NONE:
2455 		/* Remove port form EDSA and DSA entries */
2456 		mvpp2_prs_dsa_tag_set(priv, port, false,
2457 				      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2458 		mvpp2_prs_dsa_tag_set(priv, port, false,
2459 				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2460 		mvpp2_prs_dsa_tag_set(priv, port, false,
2461 				      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2462 		mvpp2_prs_dsa_tag_set(priv, port, false,
2463 				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2464 		break;
2465 
2466 	default:
2467 		if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
2468 			return -EINVAL;
2469 	}
2470 
2471 	return 0;
2472 }
2473 
2474 int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
2475 {
2476 	struct mvpp2_prs_entry pe;
2477 	u8 *ri_byte, *ri_byte_mask;
2478 	int tid, i;
2479 
2480 	memset(&pe, 0, sizeof(pe));
2481 
2482 	tid = mvpp2_prs_tcam_first_free(priv,
2483 					MVPP2_PE_LAST_FREE_TID,
2484 					MVPP2_PE_FIRST_FREE_TID);
2485 	if (tid < 0)
2486 		return tid;
2487 
2488 	pe.index = tid;
2489 
2490 	ri_byte = (u8 *)&ri;
2491 	ri_byte_mask = (u8 *)&ri_mask;
2492 
2493 	mvpp2_prs_sram_ai_update(&pe, flow, MVPP2_PRS_FLOW_ID_MASK);
2494 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2495 
2496 	for (i = 0; i < 4; i++) {
2497 		mvpp2_prs_tcam_data_byte_set(&pe, i, ri_byte[i],
2498 					     ri_byte_mask[i]);
2499 	}
2500 
2501 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2502 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2503 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2504 	mvpp2_prs_hw_write(priv, &pe);
2505 
2506 	return 0;
2507 }
2508 
2509 /* Set prs flow for the port */
2510 int mvpp2_prs_def_flow(struct mvpp2_port *port)
2511 {
2512 	struct mvpp2_prs_entry pe;
2513 	int tid;
2514 
2515 	memset(&pe, 0, sizeof(pe));
2516 
2517 	tid = mvpp2_prs_flow_find(port->priv, port->id);
2518 
2519 	/* Such entry not exist */
2520 	if (tid < 0) {
2521 		/* Go through the all entires from last to first */
2522 		tid = mvpp2_prs_tcam_first_free(port->priv,
2523 						MVPP2_PE_LAST_FREE_TID,
2524 					       MVPP2_PE_FIRST_FREE_TID);
2525 		if (tid < 0)
2526 			return tid;
2527 
2528 		pe.index = tid;
2529 
2530 		/* Set flow ID*/
2531 		mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
2532 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2533 
2534 		/* Update shadow table */
2535 		mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS);
2536 	} else {
2537 		mvpp2_prs_init_from_hw(port->priv, &pe, tid);
2538 	}
2539 
2540 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2541 	mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id));
2542 	mvpp2_prs_hw_write(port->priv, &pe);
2543 
2544 	return 0;
2545 }
2546 
2547 int mvpp2_prs_hits(struct mvpp2 *priv, int index)
2548 {
2549 	u32 val;
2550 
2551 	if (index > MVPP2_PRS_TCAM_SRAM_SIZE)
2552 		return -EINVAL;
2553 
2554 	mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index);
2555 
2556 	val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG);
2557 
2558 	val &= MVPP2_PRS_TCAM_HIT_CNT_MASK;
2559 
2560 	return val;
2561 }
2562