xref: /openbmc/linux/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c (revision b694e3c604e999343258c49e574abd7be012e726)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Header Parser helpers for Marvell PPv2 Network Controller
4  *
5  * Copyright (C) 2014 Marvell
6  *
7  * Marcin Wojtas <mw@semihalf.com>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/platform_device.h>
14 #include <uapi/linux/ppp_defs.h>
15 #include <net/ip.h>
16 #include <net/ipv6.h>
17 
18 #include "mvpp2.h"
19 #include "mvpp2_prs.h"
20 
21 /* Update parser tcam and sram hw entries */
mvpp2_prs_hw_write(struct mvpp2 * priv,struct mvpp2_prs_entry * pe)22 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
23 {
24 	int i;
25 
26 	lockdep_assert_held(&priv->prs_spinlock);
27 
28 	if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
29 		return -EINVAL;
30 
31 	/* Clear entry invalidation bit */
32 	pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
33 
34 	/* Write sram index - indirect access */
35 	mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
36 	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
37 		mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
38 
39 	/* Write tcam index - indirect access */
40 	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
41 	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
42 		mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
43 
44 	return 0;
45 }
46 
47 /* Initialize tcam entry from hw */
__mvpp2_prs_init_from_hw(struct mvpp2 * priv,struct mvpp2_prs_entry * pe,int tid)48 static int __mvpp2_prs_init_from_hw(struct mvpp2 *priv,
49 				    struct mvpp2_prs_entry *pe, int tid)
50 {
51 	int i;
52 
53 	lockdep_assert_held(&priv->prs_spinlock);
54 
55 	if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
56 		return -EINVAL;
57 
58 	memset(pe, 0, sizeof(*pe));
59 	pe->index = tid;
60 
61 	/* Write tcam index - indirect access */
62 	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
63 
64 	pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
65 			      MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
66 	if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
67 		return MVPP2_PRS_TCAM_ENTRY_INVALID;
68 
69 	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
70 		pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
71 
72 	/* Write sram index - indirect access */
73 	mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
74 	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
75 		pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
76 
77 	return 0;
78 }
79 
mvpp2_prs_init_from_hw(struct mvpp2 * priv,struct mvpp2_prs_entry * pe,int tid)80 int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
81 			   int tid)
82 {
83 	int err;
84 
85 	spin_lock_bh(&priv->prs_spinlock);
86 	err = __mvpp2_prs_init_from_hw(priv, pe, tid);
87 	spin_unlock_bh(&priv->prs_spinlock);
88 
89 	return err;
90 }
91 
92 /* Invalidate tcam hw entry */
mvpp2_prs_hw_inv(struct mvpp2 * priv,int index)93 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
94 {
95 	/* Write index - indirect access */
96 	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
97 	mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
98 		    MVPP2_PRS_TCAM_INV_MASK);
99 }
100 
101 /* Enable shadow table entry and set its lookup ID */
mvpp2_prs_shadow_set(struct mvpp2 * priv,int index,int lu)102 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
103 {
104 	priv->prs_shadow[index].valid = true;
105 	priv->prs_shadow[index].lu = lu;
106 }
107 
108 /* Update ri fields in shadow table entry */
mvpp2_prs_shadow_ri_set(struct mvpp2 * priv,int index,unsigned int ri,unsigned int ri_mask)109 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
110 				    unsigned int ri, unsigned int ri_mask)
111 {
112 	priv->prs_shadow[index].ri_mask = ri_mask;
113 	priv->prs_shadow[index].ri = ri;
114 }
115 
116 /* Update lookup field in tcam sw entry */
mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry * pe,unsigned int lu)117 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
118 {
119 	pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK);
120 	pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
121 	pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK);
122 	pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
123 }
124 
125 /* Update mask for single port in tcam sw entry */
mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry * pe,unsigned int port,bool add)126 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
127 				    unsigned int port, bool add)
128 {
129 	if (add)
130 		pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port));
131 	else
132 		pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port));
133 }
134 
135 /* Update port map in tcam sw entry */
mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry * pe,unsigned int ports)136 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
137 					unsigned int ports)
138 {
139 	pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK);
140 	pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK);
141 	pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK);
142 }
143 
144 /* Obtain port map from tcam sw entry */
mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry * pe)145 unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
146 {
147 	return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK;
148 }
149 
150 /* Set byte of data and its enable bits in tcam sw entry */
mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry * pe,unsigned int offs,unsigned char byte,unsigned char enable)151 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
152 					 unsigned int offs, unsigned char byte,
153 					 unsigned char enable)
154 {
155 	int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
156 
157 	pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos);
158 	pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos);
159 	pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos;
160 	pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos);
161 }
162 
163 /* Get byte of data and its enable bits from tcam sw entry */
mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry * pe,unsigned int offs,unsigned char * byte,unsigned char * enable)164 void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
165 				  unsigned int offs, unsigned char *byte,
166 				  unsigned char *enable)
167 {
168 	int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
169 
170 	*byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff;
171 	*enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff;
172 }
173 
174 /* Compare tcam data bytes with a pattern */
mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry * pe,int offs,u16 data)175 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
176 				    u16 data)
177 {
178 	u16 tcam_data;
179 
180 	tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff;
181 	return tcam_data == data;
182 }
183 
184 /* Update ai bits in tcam sw entry */
mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry * pe,unsigned int bits,unsigned int enable)185 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
186 				     unsigned int bits, unsigned int enable)
187 {
188 	int i;
189 
190 	for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
191 		if (!(enable & BIT(i)))
192 			continue;
193 
194 		if (bits & BIT(i))
195 			pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i);
196 		else
197 			pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i);
198 	}
199 
200 	pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable);
201 }
202 
203 /* Get ai bits from tcam sw entry */
mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry * pe)204 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
205 {
206 	return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK;
207 }
208 
209 /* Set ethertype in tcam sw entry */
mvpp2_prs_match_etype(struct mvpp2_prs_entry * pe,int offset,unsigned short ethertype)210 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
211 				  unsigned short ethertype)
212 {
213 	mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
214 	mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
215 }
216 
217 /* Set vid in tcam sw entry */
mvpp2_prs_match_vid(struct mvpp2_prs_entry * pe,int offset,unsigned short vid)218 static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
219 				unsigned short vid)
220 {
221 	mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf);
222 	mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff);
223 }
224 
225 /* Set bits in sram sw entry */
mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry * pe,int bit_num,u32 val)226 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
227 				    u32 val)
228 {
229 	pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num)));
230 }
231 
232 /* Clear bits in sram sw entry */
mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry * pe,int bit_num,u32 val)233 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
234 				      u32 val)
235 {
236 	pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num)));
237 }
238 
239 /* Update ri bits in sram sw entry */
mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry * pe,unsigned int bits,unsigned int mask)240 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
241 				     unsigned int bits, unsigned int mask)
242 {
243 	unsigned int i;
244 
245 	for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
246 		if (!(mask & BIT(i)))
247 			continue;
248 
249 		if (bits & BIT(i))
250 			mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i,
251 						1);
252 		else
253 			mvpp2_prs_sram_bits_clear(pe,
254 						  MVPP2_PRS_SRAM_RI_OFFS + i,
255 						  1);
256 
257 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
258 	}
259 }
260 
261 /* Obtain ri bits from sram sw entry */
mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry * pe)262 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
263 {
264 	return pe->sram[MVPP2_PRS_SRAM_RI_WORD];
265 }
266 
267 /* Update ai bits in sram sw entry */
mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry * pe,unsigned int bits,unsigned int mask)268 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
269 				     unsigned int bits, unsigned int mask)
270 {
271 	unsigned int i;
272 
273 	for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
274 		if (!(mask & BIT(i)))
275 			continue;
276 
277 		if (bits & BIT(i))
278 			mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i,
279 						1);
280 		else
281 			mvpp2_prs_sram_bits_clear(pe,
282 						  MVPP2_PRS_SRAM_AI_OFFS + i,
283 						  1);
284 
285 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
286 	}
287 }
288 
289 /* Read ai bits from sram sw entry */
mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry * pe)290 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
291 {
292 	u8 bits;
293 	/* ai is stored on bits 90->97; so it spreads across two u32 */
294 	int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS);
295 	int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS);
296 
297 	bits = (pe->sram[ai_off] >> ai_shift) |
298 	       (pe->sram[ai_off + 1] << (32 - ai_shift));
299 
300 	return bits;
301 }
302 
303 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
304  * lookup interation
305  */
mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry * pe,unsigned int lu)306 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
307 				       unsigned int lu)
308 {
309 	int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
310 
311 	mvpp2_prs_sram_bits_clear(pe, sram_next_off,
312 				  MVPP2_PRS_SRAM_NEXT_LU_MASK);
313 	mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
314 }
315 
316 /* In the sram sw entry set sign and value of the next lookup offset
317  * and the offset value generated to the classifier
318  */
mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry * pe,int shift,unsigned int op)319 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
320 				     unsigned int op)
321 {
322 	/* Set sign */
323 	if (shift < 0) {
324 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
325 		shift = 0 - shift;
326 	} else {
327 		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
328 	}
329 
330 	/* Set value */
331 	pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
332 		shift & MVPP2_PRS_SRAM_SHIFT_MASK;
333 
334 	/* Reset and set operation */
335 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
336 				  MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
337 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
338 
339 	/* Set base offset as current */
340 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
341 }
342 
343 /* In the sram sw entry set sign and value of the user defined offset
344  * generated to the classifier
345  */
mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry * pe,unsigned int type,int offset,unsigned int op)346 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
347 				      unsigned int type, int offset,
348 				      unsigned int op)
349 {
350 	/* Set sign */
351 	if (offset < 0) {
352 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
353 		offset = 0 - offset;
354 	} else {
355 		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
356 	}
357 
358 	/* Set value */
359 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
360 				  MVPP2_PRS_SRAM_UDF_MASK);
361 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS,
362 				offset & MVPP2_PRS_SRAM_UDF_MASK);
363 
364 	/* Set offset type */
365 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
366 				  MVPP2_PRS_SRAM_UDF_TYPE_MASK);
367 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
368 
369 	/* Set offset operation */
370 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
371 				  MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
372 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
373 				op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
374 
375 	/* Set base offset as current */
376 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
377 }
378 
379 /* Find parser flow entry */
mvpp2_prs_flow_find(struct mvpp2 * priv,int flow)380 static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
381 {
382 	struct mvpp2_prs_entry pe;
383 	int tid;
384 
385 	/* Go through the all entires with MVPP2_PRS_LU_FLOWS */
386 	for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
387 		u8 bits;
388 
389 		if (!priv->prs_shadow[tid].valid ||
390 		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
391 			continue;
392 
393 		__mvpp2_prs_init_from_hw(priv, &pe, tid);
394 		bits = mvpp2_prs_sram_ai_get(&pe);
395 
396 		/* Sram store classification lookup ID in AI bits [5:0] */
397 		if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
398 			return tid;
399 	}
400 
401 	return -ENOENT;
402 }
403 
404 /* Return first free tcam index, seeking from start to end */
mvpp2_prs_tcam_first_free(struct mvpp2 * priv,unsigned char start,unsigned char end)405 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
406 				     unsigned char end)
407 {
408 	int tid;
409 
410 	if (start > end)
411 		swap(start, end);
412 
413 	for (tid = start; tid <= end; tid++) {
414 		if (!priv->prs_shadow[tid].valid)
415 			return tid;
416 	}
417 
418 	return -EINVAL;
419 }
420 
421 /* Drop flow control pause frames */
mvpp2_prs_drop_fc(struct mvpp2 * priv)422 static void mvpp2_prs_drop_fc(struct mvpp2 *priv)
423 {
424 	unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
425 	struct mvpp2_prs_entry pe;
426 	unsigned int len;
427 
428 	memset(&pe, 0, sizeof(pe));
429 
430 	/* For all ports - drop flow control frames */
431 	pe.index = MVPP2_PE_FC_DROP;
432 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
433 
434 	/* Set match on DA */
435 	len = ETH_ALEN;
436 	while (len--)
437 		mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
438 
439 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
440 				 MVPP2_PRS_RI_DROP_MASK);
441 
442 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
443 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
444 
445 	/* Mask all ports */
446 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
447 
448 	/* Update shadow table and hw entry */
449 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
450 	mvpp2_prs_hw_write(priv, &pe);
451 }
452 
453 /* Enable/disable dropping all mac da's */
mvpp2_prs_mac_drop_all_set(struct mvpp2 * priv,int port,bool add)454 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
455 {
456 	struct mvpp2_prs_entry pe;
457 
458 	if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
459 		/* Entry exist - update port only */
460 		__mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
461 	} else {
462 		/* Entry doesn't exist - create new */
463 		memset(&pe, 0, sizeof(pe));
464 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
465 		pe.index = MVPP2_PE_DROP_ALL;
466 
467 		/* Non-promiscuous mode for all ports - DROP unknown packets */
468 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
469 					 MVPP2_PRS_RI_DROP_MASK);
470 
471 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
472 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
473 
474 		/* Update shadow table */
475 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
476 
477 		/* Mask all ports */
478 		mvpp2_prs_tcam_port_map_set(&pe, 0);
479 	}
480 
481 	/* Update port mask */
482 	mvpp2_prs_tcam_port_set(&pe, port, add);
483 
484 	mvpp2_prs_hw_write(priv, &pe);
485 }
486 
487 /* Set port to unicast or multicast promiscuous mode */
__mvpp2_prs_mac_promisc_set(struct mvpp2 * priv,int port,enum mvpp2_prs_l2_cast l2_cast,bool add)488 static void __mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
489 					enum mvpp2_prs_l2_cast l2_cast,
490 					bool add)
491 {
492 	struct mvpp2_prs_entry pe;
493 	unsigned char cast_match;
494 	unsigned int ri;
495 	int tid;
496 
497 	lockdep_assert_held(&priv->prs_spinlock);
498 
499 	if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
500 		cast_match = MVPP2_PRS_UCAST_VAL;
501 		tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
502 		ri = MVPP2_PRS_RI_L2_UCAST;
503 	} else {
504 		cast_match = MVPP2_PRS_MCAST_VAL;
505 		tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
506 		ri = MVPP2_PRS_RI_L2_MCAST;
507 	}
508 
509 	/* promiscuous mode - Accept unknown unicast or multicast packets */
510 	if (priv->prs_shadow[tid].valid) {
511 		__mvpp2_prs_init_from_hw(priv, &pe, tid);
512 	} else {
513 		memset(&pe, 0, sizeof(pe));
514 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
515 		pe.index = tid;
516 
517 		/* Continue - set next lookup */
518 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
519 
520 		/* Set result info bits */
521 		mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
522 
523 		/* Match UC or MC addresses */
524 		mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
525 					     MVPP2_PRS_CAST_MASK);
526 
527 		/* Shift to ethertype */
528 		mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
529 					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
530 
531 		/* Mask all ports */
532 		mvpp2_prs_tcam_port_map_set(&pe, 0);
533 
534 		/* Update shadow table */
535 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
536 	}
537 
538 	/* Update port mask */
539 	mvpp2_prs_tcam_port_set(&pe, port, add);
540 
541 	mvpp2_prs_hw_write(priv, &pe);
542 }
543 
mvpp2_prs_mac_promisc_set(struct mvpp2 * priv,int port,enum mvpp2_prs_l2_cast l2_cast,bool add)544 void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
545 			       enum mvpp2_prs_l2_cast l2_cast, bool add)
546 {
547 	spin_lock_bh(&priv->prs_spinlock);
548 	__mvpp2_prs_mac_promisc_set(priv, port, l2_cast, add);
549 	spin_unlock_bh(&priv->prs_spinlock);
550 }
551 
552 /* Set entry for dsa packets */
mvpp2_prs_dsa_tag_set(struct mvpp2 * priv,int port,bool add,bool tagged,bool extend)553 static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
554 				  bool tagged, bool extend)
555 {
556 	struct mvpp2_prs_entry pe;
557 	int tid, shift;
558 
559 	if (extend) {
560 		tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
561 		shift = 8;
562 	} else {
563 		tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
564 		shift = 4;
565 	}
566 
567 	if (priv->prs_shadow[tid].valid) {
568 		/* Entry exist - update port only */
569 		__mvpp2_prs_init_from_hw(priv, &pe, tid);
570 	} else {
571 		/* Entry doesn't exist - create new */
572 		memset(&pe, 0, sizeof(pe));
573 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
574 		pe.index = tid;
575 
576 		/* Update shadow table */
577 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
578 
579 		if (tagged) {
580 			/* Set tagged bit in DSA tag */
581 			mvpp2_prs_tcam_data_byte_set(&pe, 0,
582 					     MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
583 					     MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
584 
585 			/* Set ai bits for next iteration */
586 			if (extend)
587 				mvpp2_prs_sram_ai_update(&pe, 1,
588 							MVPP2_PRS_SRAM_AI_MASK);
589 			else
590 				mvpp2_prs_sram_ai_update(&pe, 0,
591 							MVPP2_PRS_SRAM_AI_MASK);
592 
593 			/* Set result info bits to 'single vlan' */
594 			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
595 						 MVPP2_PRS_RI_VLAN_MASK);
596 			/* If packet is tagged continue check vid filtering */
597 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
598 		} else {
599 			/* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
600 			mvpp2_prs_sram_shift_set(&pe, shift,
601 					MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
602 
603 			/* Set result info bits to 'no vlans' */
604 			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
605 						 MVPP2_PRS_RI_VLAN_MASK);
606 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
607 		}
608 
609 		/* Mask all ports */
610 		mvpp2_prs_tcam_port_map_set(&pe, 0);
611 	}
612 
613 	/* Update port mask */
614 	mvpp2_prs_tcam_port_set(&pe, port, add);
615 
616 	mvpp2_prs_hw_write(priv, &pe);
617 }
618 
619 /* Set entry for dsa ethertype */
mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 * priv,int port,bool add,bool tagged,bool extend)620 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
621 					    bool add, bool tagged, bool extend)
622 {
623 	struct mvpp2_prs_entry pe;
624 	int tid, shift, port_mask;
625 
626 	if (extend) {
627 		tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
628 		      MVPP2_PE_ETYPE_EDSA_UNTAGGED;
629 		port_mask = 0;
630 		shift = 8;
631 	} else {
632 		tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
633 		      MVPP2_PE_ETYPE_DSA_UNTAGGED;
634 		port_mask = MVPP2_PRS_PORT_MASK;
635 		shift = 4;
636 	}
637 
638 	if (priv->prs_shadow[tid].valid) {
639 		/* Entry exist - update port only */
640 		__mvpp2_prs_init_from_hw(priv, &pe, tid);
641 	} else {
642 		/* Entry doesn't exist - create new */
643 		memset(&pe, 0, sizeof(pe));
644 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
645 		pe.index = tid;
646 
647 		/* Set ethertype */
648 		mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
649 		mvpp2_prs_match_etype(&pe, 2, 0);
650 
651 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
652 					 MVPP2_PRS_RI_DSA_MASK);
653 		/* Shift ethertype + 2 byte reserved + tag*/
654 		mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
655 					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
656 
657 		/* Update shadow table */
658 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
659 
660 		if (tagged) {
661 			/* Set tagged bit in DSA tag */
662 			mvpp2_prs_tcam_data_byte_set(&pe,
663 						     MVPP2_ETH_TYPE_LEN + 2 + 3,
664 						 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
665 						 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
666 			/* Clear all ai bits for next iteration */
667 			mvpp2_prs_sram_ai_update(&pe, 0,
668 						 MVPP2_PRS_SRAM_AI_MASK);
669 			/* If packet is tagged continue check vlans */
670 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
671 		} else {
672 			/* Set result info bits to 'no vlans' */
673 			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
674 						 MVPP2_PRS_RI_VLAN_MASK);
675 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
676 		}
677 		/* Mask/unmask all ports, depending on dsa type */
678 		mvpp2_prs_tcam_port_map_set(&pe, port_mask);
679 	}
680 
681 	/* Update port mask */
682 	mvpp2_prs_tcam_port_set(&pe, port, add);
683 
684 	mvpp2_prs_hw_write(priv, &pe);
685 }
686 
687 /* Search for existing single/triple vlan entry */
mvpp2_prs_vlan_find(struct mvpp2 * priv,unsigned short tpid,int ai)688 static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
689 {
690 	struct mvpp2_prs_entry pe;
691 	int tid;
692 
693 	/* Go through the all entries with MVPP2_PRS_LU_VLAN */
694 	for (tid = MVPP2_PE_FIRST_FREE_TID;
695 	     tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
696 		unsigned int ri_bits, ai_bits;
697 		bool match;
698 
699 		if (!priv->prs_shadow[tid].valid ||
700 		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
701 			continue;
702 
703 		__mvpp2_prs_init_from_hw(priv, &pe, tid);
704 		match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid);
705 		if (!match)
706 			continue;
707 
708 		/* Get vlan type */
709 		ri_bits = mvpp2_prs_sram_ri_get(&pe);
710 		ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
711 
712 		/* Get current ai value from tcam */
713 		ai_bits = mvpp2_prs_tcam_ai_get(&pe);
714 		/* Clear double vlan bit */
715 		ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
716 
717 		if (ai != ai_bits)
718 			continue;
719 
720 		if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
721 		    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
722 			return tid;
723 	}
724 
725 	return -ENOENT;
726 }
727 
728 /* Add/update single/triple vlan entry */
mvpp2_prs_vlan_add(struct mvpp2 * priv,unsigned short tpid,int ai,unsigned int port_map)729 static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
730 			      unsigned int port_map)
731 {
732 	struct mvpp2_prs_entry pe;
733 	int tid_aux, tid;
734 	int ret = 0;
735 
736 	memset(&pe, 0, sizeof(pe));
737 
738 	tid = mvpp2_prs_vlan_find(priv, tpid, ai);
739 
740 	if (tid < 0) {
741 		/* Create new tcam entry */
742 		tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
743 						MVPP2_PE_FIRST_FREE_TID);
744 		if (tid < 0)
745 			return tid;
746 
747 		/* Get last double vlan tid */
748 		for (tid_aux = MVPP2_PE_LAST_FREE_TID;
749 		     tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
750 			unsigned int ri_bits;
751 
752 			if (!priv->prs_shadow[tid_aux].valid ||
753 			    priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
754 				continue;
755 
756 			__mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
757 			ri_bits = mvpp2_prs_sram_ri_get(&pe);
758 			if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
759 			    MVPP2_PRS_RI_VLAN_DOUBLE)
760 				break;
761 		}
762 
763 		if (tid <= tid_aux)
764 			return -EINVAL;
765 
766 		memset(&pe, 0, sizeof(pe));
767 		pe.index = tid;
768 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
769 
770 		mvpp2_prs_match_etype(&pe, 0, tpid);
771 
772 		/* VLAN tag detected, proceed with VID filtering */
773 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
774 
775 		/* Clear all ai bits for next iteration */
776 		mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
777 
778 		if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
779 			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
780 						 MVPP2_PRS_RI_VLAN_MASK);
781 		} else {
782 			ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
783 			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE,
784 						 MVPP2_PRS_RI_VLAN_MASK);
785 		}
786 		mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK);
787 
788 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
789 	} else {
790 		__mvpp2_prs_init_from_hw(priv, &pe, tid);
791 	}
792 	/* Update ports' mask */
793 	mvpp2_prs_tcam_port_map_set(&pe, port_map);
794 
795 	mvpp2_prs_hw_write(priv, &pe);
796 
797 	return ret;
798 }
799 
800 /* Get first free double vlan ai number */
mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 * priv)801 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
802 {
803 	int i;
804 
805 	for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
806 		if (!priv->prs_double_vlans[i])
807 			return i;
808 	}
809 
810 	return -EINVAL;
811 }
812 
813 /* Search for existing double vlan entry */
mvpp2_prs_double_vlan_find(struct mvpp2 * priv,unsigned short tpid1,unsigned short tpid2)814 static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
815 				      unsigned short tpid2)
816 {
817 	struct mvpp2_prs_entry pe;
818 	int tid;
819 
820 	/* Go through the all entries with MVPP2_PRS_LU_VLAN */
821 	for (tid = MVPP2_PE_FIRST_FREE_TID;
822 	     tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
823 		unsigned int ri_mask;
824 		bool match;
825 
826 		if (!priv->prs_shadow[tid].valid ||
827 		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
828 			continue;
829 
830 		__mvpp2_prs_init_from_hw(priv, &pe, tid);
831 
832 		match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) &&
833 			mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2);
834 
835 		if (!match)
836 			continue;
837 
838 		ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK;
839 		if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
840 			return tid;
841 	}
842 
843 	return -ENOENT;
844 }
845 
846 /* Add or update double vlan entry */
mvpp2_prs_double_vlan_add(struct mvpp2 * priv,unsigned short tpid1,unsigned short tpid2,unsigned int port_map)847 static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
848 				     unsigned short tpid2,
849 				     unsigned int port_map)
850 {
851 	int tid_aux, tid, ai, ret = 0;
852 	struct mvpp2_prs_entry pe;
853 
854 	memset(&pe, 0, sizeof(pe));
855 
856 	tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
857 
858 	if (tid < 0) {
859 		/* Create new tcam entry */
860 		tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
861 				MVPP2_PE_LAST_FREE_TID);
862 		if (tid < 0)
863 			return tid;
864 
865 		/* Set ai value for new double vlan entry */
866 		ai = mvpp2_prs_double_vlan_ai_free_get(priv);
867 		if (ai < 0)
868 			return ai;
869 
870 		/* Get first single/triple vlan tid */
871 		for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
872 		     tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
873 			unsigned int ri_bits;
874 
875 			if (!priv->prs_shadow[tid_aux].valid ||
876 			    priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
877 				continue;
878 
879 			__mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
880 			ri_bits = mvpp2_prs_sram_ri_get(&pe);
881 			ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
882 			if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
883 			    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
884 				break;
885 		}
886 
887 		if (tid >= tid_aux)
888 			return -ERANGE;
889 
890 		memset(&pe, 0, sizeof(pe));
891 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
892 		pe.index = tid;
893 
894 		priv->prs_double_vlans[ai] = true;
895 
896 		mvpp2_prs_match_etype(&pe, 0, tpid1);
897 		mvpp2_prs_match_etype(&pe, 4, tpid2);
898 
899 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
900 		/* Shift 4 bytes - skip outer vlan tag */
901 		mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
902 					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
903 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
904 					 MVPP2_PRS_RI_VLAN_MASK);
905 		mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
906 					 MVPP2_PRS_SRAM_AI_MASK);
907 
908 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
909 	} else {
910 		__mvpp2_prs_init_from_hw(priv, &pe, tid);
911 	}
912 
913 	/* Update ports' mask */
914 	mvpp2_prs_tcam_port_map_set(&pe, port_map);
915 	mvpp2_prs_hw_write(priv, &pe);
916 
917 	return ret;
918 }
919 
920 /* IPv4 header parsing for fragmentation and L4 offset */
mvpp2_prs_ip4_proto(struct mvpp2 * priv,unsigned short proto,unsigned int ri,unsigned int ri_mask)921 static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
922 			       unsigned int ri, unsigned int ri_mask)
923 {
924 	struct mvpp2_prs_entry pe;
925 	int tid;
926 
927 	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
928 	    (proto != IPPROTO_IGMP))
929 		return -EINVAL;
930 
931 	/* Not fragmented packet */
932 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
933 					MVPP2_PE_LAST_FREE_TID);
934 	if (tid < 0)
935 		return tid;
936 
937 	memset(&pe, 0, sizeof(pe));
938 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
939 	pe.index = tid;
940 
941 	/* Finished: go to flowid generation */
942 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
943 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
944 
945 	/* Set L3 offset */
946 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4,
947 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
948 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
949 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
950 
951 	mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
952 				     MVPP2_PRS_TCAM_PROTO_MASK_L);
953 	mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
954 				     MVPP2_PRS_TCAM_PROTO_MASK);
955 
956 	mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
957 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
958 				 MVPP2_PRS_IPV4_DIP_AI_BIT);
959 	/* Unmask all ports */
960 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
961 
962 	/* Update shadow table and hw entry */
963 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
964 	mvpp2_prs_hw_write(priv, &pe);
965 
966 	/* Fragmented packet */
967 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
968 					MVPP2_PE_LAST_FREE_TID);
969 	if (tid < 0)
970 		return tid;
971 
972 	pe.index = tid;
973 	/* Clear ri before updating */
974 	pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
975 	pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
976 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
977 
978 	mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
979 				 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
980 
981 	mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
982 	mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
983 
984 	/* Update shadow table and hw entry */
985 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
986 	mvpp2_prs_hw_write(priv, &pe);
987 
988 	return 0;
989 }
990 
991 /* IPv4 L3 multicast or broadcast */
mvpp2_prs_ip4_cast(struct mvpp2 * priv,unsigned short l3_cast)992 static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
993 {
994 	struct mvpp2_prs_entry pe;
995 	int mask, tid;
996 
997 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
998 					MVPP2_PE_LAST_FREE_TID);
999 	if (tid < 0)
1000 		return tid;
1001 
1002 	memset(&pe, 0, sizeof(pe));
1003 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1004 	pe.index = tid;
1005 
1006 	switch (l3_cast) {
1007 	case MVPP2_PRS_L3_MULTI_CAST:
1008 		mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
1009 					     MVPP2_PRS_IPV4_MC_MASK);
1010 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
1011 					 MVPP2_PRS_RI_L3_ADDR_MASK);
1012 		break;
1013 	case  MVPP2_PRS_L3_BROAD_CAST:
1014 		mask = MVPP2_PRS_IPV4_BC_MASK;
1015 		mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
1016 		mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
1017 		mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
1018 		mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
1019 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
1020 					 MVPP2_PRS_RI_L3_ADDR_MASK);
1021 		break;
1022 	default:
1023 		return -EINVAL;
1024 	}
1025 
1026 	/* Go again to ipv4 */
1027 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1028 
1029 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1030 				 MVPP2_PRS_IPV4_DIP_AI_BIT);
1031 
1032 	/* Shift back to IPv4 proto */
1033 	mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1034 
1035 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1036 
1037 	/* Unmask all ports */
1038 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1039 
1040 	/* Update shadow table and hw entry */
1041 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1042 	mvpp2_prs_hw_write(priv, &pe);
1043 
1044 	return 0;
1045 }
1046 
1047 /* Set entries for protocols over IPv6  */
mvpp2_prs_ip6_proto(struct mvpp2 * priv,unsigned short proto,unsigned int ri,unsigned int ri_mask)1048 static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
1049 			       unsigned int ri, unsigned int ri_mask)
1050 {
1051 	struct mvpp2_prs_entry pe;
1052 	int tid;
1053 
1054 	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
1055 	    (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
1056 		return -EINVAL;
1057 
1058 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1059 					MVPP2_PE_LAST_FREE_TID);
1060 	if (tid < 0)
1061 		return tid;
1062 
1063 	memset(&pe, 0, sizeof(pe));
1064 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1065 	pe.index = tid;
1066 
1067 	/* Finished: go to flowid generation */
1068 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1069 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1070 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
1071 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1072 				  sizeof(struct ipv6hdr) - 6,
1073 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1074 
1075 	mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
1076 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1077 				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1078 	/* Unmask all ports */
1079 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1080 
1081 	/* Write HW */
1082 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1083 	mvpp2_prs_hw_write(priv, &pe);
1084 
1085 	return 0;
1086 }
1087 
1088 /* IPv6 L3 multicast entry */
mvpp2_prs_ip6_cast(struct mvpp2 * priv,unsigned short l3_cast)1089 static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
1090 {
1091 	struct mvpp2_prs_entry pe;
1092 	int tid;
1093 
1094 	if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
1095 		return -EINVAL;
1096 
1097 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1098 					MVPP2_PE_LAST_FREE_TID);
1099 	if (tid < 0)
1100 		return tid;
1101 
1102 	memset(&pe, 0, sizeof(pe));
1103 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1104 	pe.index = tid;
1105 
1106 	/* Finished: go to flowid generation */
1107 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1108 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
1109 				 MVPP2_PRS_RI_L3_ADDR_MASK);
1110 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1111 				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1112 	/* Shift back to IPv6 NH */
1113 	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1114 
1115 	mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
1116 				     MVPP2_PRS_IPV6_MC_MASK);
1117 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1118 	/* Unmask all ports */
1119 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1120 
1121 	/* Update shadow table and hw entry */
1122 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1123 	mvpp2_prs_hw_write(priv, &pe);
1124 
1125 	return 0;
1126 }
1127 
1128 /* Parser per-port initialization */
mvpp2_prs_hw_port_init(struct mvpp2 * priv,int port,int lu_first,int lu_max,int offset)1129 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
1130 				   int lu_max, int offset)
1131 {
1132 	u32 val;
1133 
1134 	/* Set lookup ID */
1135 	val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
1136 	val &= ~MVPP2_PRS_PORT_LU_MASK(port);
1137 	val |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
1138 	mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
1139 
1140 	/* Set maximum number of loops for packet received from port */
1141 	val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
1142 	val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
1143 	val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
1144 	mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
1145 
1146 	/* Set initial offset for packet header extraction for the first
1147 	 * searching loop
1148 	 */
1149 	val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
1150 	val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
1151 	val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
1152 	mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
1153 }
1154 
1155 /* Default flow entries initialization for all ports */
mvpp2_prs_def_flow_init(struct mvpp2 * priv)1156 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
1157 {
1158 	struct mvpp2_prs_entry pe;
1159 	int port;
1160 
1161 	for (port = 0; port < MVPP2_MAX_PORTS; port++) {
1162 		memset(&pe, 0, sizeof(pe));
1163 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1164 		pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
1165 
1166 		/* Mask all ports */
1167 		mvpp2_prs_tcam_port_map_set(&pe, 0);
1168 
1169 		/* Set flow ID*/
1170 		mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
1171 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
1172 
1173 		/* Update shadow table and hw entry */
1174 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
1175 		mvpp2_prs_hw_write(priv, &pe);
1176 	}
1177 }
1178 
1179 /* Set default entry for Marvell Header field */
mvpp2_prs_mh_init(struct mvpp2 * priv)1180 static void mvpp2_prs_mh_init(struct mvpp2 *priv)
1181 {
1182 	struct mvpp2_prs_entry pe;
1183 
1184 	memset(&pe, 0, sizeof(pe));
1185 
1186 	pe.index = MVPP2_PE_MH_DEFAULT;
1187 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1188 	mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1189 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1190 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
1191 
1192 	/* Unmask all ports */
1193 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1194 
1195 	/* Update shadow table and hw entry */
1196 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1197 	mvpp2_prs_hw_write(priv, &pe);
1198 
1199 	/* Set MH entry that skip parser */
1200 	pe.index = MVPP2_PE_MH_SKIP_PRS;
1201 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1202 	mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1203 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1204 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1205 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1206 
1207 	/* Mask all ports */
1208 	mvpp2_prs_tcam_port_map_set(&pe, 0);
1209 
1210 	/* Update shadow table and hw entry */
1211 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1212 	mvpp2_prs_hw_write(priv, &pe);
1213 }
1214 
1215 /* Set default entires (place holder) for promiscuous, non-promiscuous and
1216  * multicast MAC addresses
1217  */
mvpp2_prs_mac_init(struct mvpp2 * priv)1218 static void mvpp2_prs_mac_init(struct mvpp2 *priv)
1219 {
1220 	struct mvpp2_prs_entry pe;
1221 
1222 	memset(&pe, 0, sizeof(pe));
1223 
1224 	/* Non-promiscuous mode for all ports - DROP unknown packets */
1225 	pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
1226 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1227 
1228 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1229 				 MVPP2_PRS_RI_DROP_MASK);
1230 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1231 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1232 
1233 	/* Unmask all ports */
1234 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1235 
1236 	/* Update shadow table and hw entry */
1237 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1238 	mvpp2_prs_hw_write(priv, &pe);
1239 
1240 	/* Create dummy entries for drop all and promiscuous modes */
1241 	mvpp2_prs_drop_fc(priv);
1242 	mvpp2_prs_mac_drop_all_set(priv, 0, false);
1243 	__mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
1244 	__mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
1245 }
1246 
1247 /* Set default entries for various types of dsa packets */
mvpp2_prs_dsa_init(struct mvpp2 * priv)1248 static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
1249 {
1250 	struct mvpp2_prs_entry pe;
1251 
1252 	/* None tagged EDSA entry - place holder */
1253 	mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
1254 			      MVPP2_PRS_EDSA);
1255 
1256 	/* Tagged EDSA entry - place holder */
1257 	mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
1258 
1259 	/* None tagged DSA entry - place holder */
1260 	mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
1261 			      MVPP2_PRS_DSA);
1262 
1263 	/* Tagged DSA entry - place holder */
1264 	mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
1265 
1266 	/* None tagged EDSA ethertype entry - place holder*/
1267 	mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
1268 					MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
1269 
1270 	/* Tagged EDSA ethertype entry - place holder*/
1271 	mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
1272 					MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
1273 
1274 	/* None tagged DSA ethertype entry */
1275 	mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
1276 					MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
1277 
1278 	/* Tagged DSA ethertype entry */
1279 	mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
1280 					MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
1281 
1282 	/* Set default entry, in case DSA or EDSA tag not found */
1283 	memset(&pe, 0, sizeof(pe));
1284 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1285 	pe.index = MVPP2_PE_DSA_DEFAULT;
1286 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1287 
1288 	/* Shift 0 bytes */
1289 	mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1290 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1291 
1292 	/* Clear all sram ai bits for next iteration */
1293 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1294 
1295 	/* Unmask all ports */
1296 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1297 
1298 	mvpp2_prs_hw_write(priv, &pe);
1299 }
1300 
1301 /* Initialize parser entries for VID filtering */
mvpp2_prs_vid_init(struct mvpp2 * priv)1302 static void mvpp2_prs_vid_init(struct mvpp2 *priv)
1303 {
1304 	struct mvpp2_prs_entry pe;
1305 
1306 	memset(&pe, 0, sizeof(pe));
1307 
1308 	/* Set default vid entry */
1309 	pe.index = MVPP2_PE_VID_FLTR_DEFAULT;
1310 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1311 
1312 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT);
1313 
1314 	/* Skip VLAN header - Set offset to 4 bytes */
1315 	mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
1316 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1317 
1318 	/* Clear all ai bits for next iteration */
1319 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1320 
1321 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1322 
1323 	/* Unmask all ports */
1324 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1325 
1326 	/* Update shadow table and hw entry */
1327 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
1328 	mvpp2_prs_hw_write(priv, &pe);
1329 
1330 	/* Set default vid entry for extended DSA*/
1331 	memset(&pe, 0, sizeof(pe));
1332 
1333 	/* Set default vid entry */
1334 	pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT;
1335 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1336 
1337 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT,
1338 				 MVPP2_PRS_EDSA_VID_AI_BIT);
1339 
1340 	/* Skip VLAN header - Set offset to 8 bytes */
1341 	mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN,
1342 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1343 
1344 	/* Clear all ai bits for next iteration */
1345 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1346 
1347 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1348 
1349 	/* Unmask all ports */
1350 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1351 
1352 	/* Update shadow table and hw entry */
1353 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
1354 	mvpp2_prs_hw_write(priv, &pe);
1355 }
1356 
1357 /* Match basic ethertypes */
mvpp2_prs_etype_init(struct mvpp2 * priv)1358 static int mvpp2_prs_etype_init(struct mvpp2 *priv)
1359 {
1360 	struct mvpp2_prs_entry pe;
1361 	int tid, ihl;
1362 
1363 	/* Ethertype: PPPoE */
1364 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1365 					MVPP2_PE_LAST_FREE_TID);
1366 	if (tid < 0)
1367 		return tid;
1368 
1369 	memset(&pe, 0, sizeof(pe));
1370 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1371 	pe.index = tid;
1372 
1373 	mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
1374 
1375 	mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
1376 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1377 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1378 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
1379 				 MVPP2_PRS_RI_PPPOE_MASK);
1380 
1381 	/* Update shadow table and hw entry */
1382 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1383 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1384 	priv->prs_shadow[pe.index].finish = false;
1385 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
1386 				MVPP2_PRS_RI_PPPOE_MASK);
1387 	mvpp2_prs_hw_write(priv, &pe);
1388 
1389 	/* Ethertype: ARP */
1390 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1391 					MVPP2_PE_LAST_FREE_TID);
1392 	if (tid < 0)
1393 		return tid;
1394 
1395 	memset(&pe, 0, sizeof(pe));
1396 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1397 	pe.index = tid;
1398 
1399 	mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
1400 
1401 	/* Generate flow in the next iteration*/
1402 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1403 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1404 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
1405 				 MVPP2_PRS_RI_L3_PROTO_MASK);
1406 	/* Set L3 offset */
1407 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1408 				  MVPP2_ETH_TYPE_LEN,
1409 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1410 
1411 	/* Update shadow table and hw entry */
1412 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1413 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1414 	priv->prs_shadow[pe.index].finish = true;
1415 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
1416 				MVPP2_PRS_RI_L3_PROTO_MASK);
1417 	mvpp2_prs_hw_write(priv, &pe);
1418 
1419 	/* Ethertype: LBTD */
1420 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1421 					MVPP2_PE_LAST_FREE_TID);
1422 	if (tid < 0)
1423 		return tid;
1424 
1425 	memset(&pe, 0, sizeof(pe));
1426 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1427 	pe.index = tid;
1428 
1429 	mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
1430 
1431 	/* Generate flow in the next iteration*/
1432 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1433 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1434 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1435 				 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1436 				 MVPP2_PRS_RI_CPU_CODE_MASK |
1437 				 MVPP2_PRS_RI_UDF3_MASK);
1438 	/* Set L3 offset */
1439 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1440 				  MVPP2_ETH_TYPE_LEN,
1441 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1442 
1443 	/* Update shadow table and hw entry */
1444 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1445 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1446 	priv->prs_shadow[pe.index].finish = true;
1447 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1448 				MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1449 				MVPP2_PRS_RI_CPU_CODE_MASK |
1450 				MVPP2_PRS_RI_UDF3_MASK);
1451 	mvpp2_prs_hw_write(priv, &pe);
1452 
1453 	/* Ethertype: IPv4 with header length >= 5 */
1454 	for (ihl = MVPP2_PRS_IPV4_IHL_MIN; ihl <= MVPP2_PRS_IPV4_IHL_MAX; ihl++) {
1455 		tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1456 						MVPP2_PE_LAST_FREE_TID);
1457 		if (tid < 0)
1458 			return tid;
1459 
1460 		memset(&pe, 0, sizeof(pe));
1461 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1462 		pe.index = tid;
1463 
1464 		mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
1465 		mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1466 					     MVPP2_PRS_IPV4_HEAD | ihl,
1467 					     MVPP2_PRS_IPV4_HEAD_MASK |
1468 					     MVPP2_PRS_IPV4_IHL_MASK);
1469 
1470 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1471 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1472 					 MVPP2_PRS_RI_L3_PROTO_MASK);
1473 		/* goto ipv4 dst-address (skip eth_type + IP-header-size - 4) */
1474 		mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
1475 					 sizeof(struct iphdr) - 4,
1476 					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1477 		/* Set L4 offset */
1478 		mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1479 					  MVPP2_ETH_TYPE_LEN + (ihl * 4),
1480 					  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1481 
1482 		/* Update shadow table and hw entry */
1483 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1484 		priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1485 		priv->prs_shadow[pe.index].finish = false;
1486 		mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
1487 					MVPP2_PRS_RI_L3_PROTO_MASK);
1488 		mvpp2_prs_hw_write(priv, &pe);
1489 	}
1490 
1491 	/* Ethertype: IPv6 without options */
1492 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1493 					MVPP2_PE_LAST_FREE_TID);
1494 	if (tid < 0)
1495 		return tid;
1496 
1497 	memset(&pe, 0, sizeof(pe));
1498 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1499 	pe.index = tid;
1500 
1501 	mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
1502 
1503 	/* Skip DIP of IPV6 header */
1504 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1505 				 MVPP2_MAX_L3_ADDR_SIZE,
1506 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1507 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1508 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1509 				 MVPP2_PRS_RI_L3_PROTO_MASK);
1510 	/* Set L3 offset */
1511 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1512 				  MVPP2_ETH_TYPE_LEN,
1513 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1514 
1515 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1516 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1517 	priv->prs_shadow[pe.index].finish = false;
1518 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
1519 				MVPP2_PRS_RI_L3_PROTO_MASK);
1520 	mvpp2_prs_hw_write(priv, &pe);
1521 
1522 	/* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
1523 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1524 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1525 	pe.index = MVPP2_PE_ETH_TYPE_UN;
1526 
1527 	/* Unmask all ports */
1528 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1529 
1530 	/* Generate flow in the next iteration*/
1531 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1532 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1533 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1534 				 MVPP2_PRS_RI_L3_PROTO_MASK);
1535 	/* Set L3 offset even it's unknown L3 */
1536 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1537 				  MVPP2_ETH_TYPE_LEN,
1538 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1539 
1540 	/* Update shadow table and hw entry */
1541 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1542 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1543 	priv->prs_shadow[pe.index].finish = true;
1544 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
1545 				MVPP2_PRS_RI_L3_PROTO_MASK);
1546 	mvpp2_prs_hw_write(priv, &pe);
1547 
1548 	return 0;
1549 }
1550 
1551 /* Configure vlan entries and detect up to 2 successive VLAN tags.
1552  * Possible options:
1553  * 0x8100, 0x88A8
1554  * 0x8100, 0x8100
1555  * 0x8100
1556  * 0x88A8
1557  */
mvpp2_prs_vlan_init(struct platform_device * pdev,struct mvpp2 * priv)1558 static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
1559 {
1560 	struct mvpp2_prs_entry pe;
1561 	int err;
1562 
1563 	/* Double VLAN: 0x88A8, 0x8100 */
1564 	err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021AD, ETH_P_8021Q,
1565 					MVPP2_PRS_PORT_MASK);
1566 	if (err)
1567 		return err;
1568 
1569 	/* Double VLAN: 0x8100, 0x8100 */
1570 	err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
1571 					MVPP2_PRS_PORT_MASK);
1572 	if (err)
1573 		return err;
1574 
1575 	/* Single VLAN: 0x88a8 */
1576 	err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
1577 				 MVPP2_PRS_PORT_MASK);
1578 	if (err)
1579 		return err;
1580 
1581 	/* Single VLAN: 0x8100 */
1582 	err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
1583 				 MVPP2_PRS_PORT_MASK);
1584 	if (err)
1585 		return err;
1586 
1587 	/* Set default double vlan entry */
1588 	memset(&pe, 0, sizeof(pe));
1589 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1590 	pe.index = MVPP2_PE_VLAN_DBL;
1591 
1592 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
1593 
1594 	/* Clear ai for next iterations */
1595 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1596 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1597 				 MVPP2_PRS_RI_VLAN_MASK);
1598 
1599 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
1600 				 MVPP2_PRS_DBL_VLAN_AI_BIT);
1601 	/* Unmask all ports */
1602 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1603 
1604 	/* Update shadow table and hw entry */
1605 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
1606 	mvpp2_prs_hw_write(priv, &pe);
1607 
1608 	/* Set default vlan none entry */
1609 	memset(&pe, 0, sizeof(pe));
1610 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1611 	pe.index = MVPP2_PE_VLAN_NONE;
1612 
1613 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1614 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1615 				 MVPP2_PRS_RI_VLAN_MASK);
1616 
1617 	/* Unmask all ports */
1618 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1619 
1620 	/* Update shadow table and hw entry */
1621 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
1622 	mvpp2_prs_hw_write(priv, &pe);
1623 
1624 	return 0;
1625 }
1626 
1627 /* Set entries for PPPoE ethertype */
mvpp2_prs_pppoe_init(struct mvpp2 * priv)1628 static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
1629 {
1630 	struct mvpp2_prs_entry pe;
1631 	int tid, ihl;
1632 
1633 	/* IPv4 over PPPoE with header length >= 5 */
1634 	for (ihl = MVPP2_PRS_IPV4_IHL_MIN; ihl <= MVPP2_PRS_IPV4_IHL_MAX; ihl++) {
1635 		tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1636 						MVPP2_PE_LAST_FREE_TID);
1637 		if (tid < 0)
1638 			return tid;
1639 
1640 		memset(&pe, 0, sizeof(pe));
1641 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1642 		pe.index = tid;
1643 
1644 		mvpp2_prs_match_etype(&pe, 0, PPP_IP);
1645 		mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1646 					     MVPP2_PRS_IPV4_HEAD | ihl,
1647 					     MVPP2_PRS_IPV4_HEAD_MASK |
1648 					     MVPP2_PRS_IPV4_IHL_MASK);
1649 
1650 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1651 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1652 					 MVPP2_PRS_RI_L3_PROTO_MASK);
1653 		/* goto ipv4 dst-address (skip eth_type + IP-header-size - 4) */
1654 		mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
1655 					 sizeof(struct iphdr) - 4,
1656 					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1657 		/* Set L3 offset */
1658 		mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1659 					  MVPP2_ETH_TYPE_LEN,
1660 					  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1661 		/* Set L4 offset */
1662 		mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1663 					  MVPP2_ETH_TYPE_LEN + (ihl * 4),
1664 					  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1665 
1666 		/* Update shadow table and hw entry */
1667 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1668 		mvpp2_prs_hw_write(priv, &pe);
1669 	}
1670 
1671 	/* IPv6 over PPPoE */
1672 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1673 					MVPP2_PE_LAST_FREE_TID);
1674 	if (tid < 0)
1675 		return tid;
1676 
1677 	memset(&pe, 0, sizeof(pe));
1678 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1679 	pe.index = tid;
1680 
1681 	mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
1682 
1683 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1684 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1685 				 MVPP2_PRS_RI_L3_PROTO_MASK);
1686 	/* Jump to DIP of IPV6 header */
1687 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1688 				 MVPP2_MAX_L3_ADDR_SIZE,
1689 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1690 	/* Set L3 offset */
1691 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1692 				  MVPP2_ETH_TYPE_LEN,
1693 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1694 
1695 	/* Update shadow table and hw entry */
1696 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1697 	mvpp2_prs_hw_write(priv, &pe);
1698 
1699 	/* Non-IP over PPPoE */
1700 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1701 					MVPP2_PE_LAST_FREE_TID);
1702 	if (tid < 0)
1703 		return tid;
1704 
1705 	memset(&pe, 0, sizeof(pe));
1706 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1707 	pe.index = tid;
1708 
1709 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1710 				 MVPP2_PRS_RI_L3_PROTO_MASK);
1711 
1712 	/* Finished: go to flowid generation */
1713 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1714 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1715 	/* Set L3 offset even if it's unknown L3 */
1716 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1717 				  MVPP2_ETH_TYPE_LEN,
1718 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1719 
1720 	/* Update shadow table and hw entry */
1721 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1722 	mvpp2_prs_hw_write(priv, &pe);
1723 
1724 	return 0;
1725 }
1726 
1727 /* Initialize entries for IPv4 */
mvpp2_prs_ip4_init(struct mvpp2 * priv)1728 static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
1729 {
1730 	struct mvpp2_prs_entry pe;
1731 	int err;
1732 
1733 	/* Set entries for TCP, UDP and IGMP over IPv4 */
1734 	err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1735 				  MVPP2_PRS_RI_L4_PROTO_MASK);
1736 	if (err)
1737 		return err;
1738 
1739 	err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1740 				  MVPP2_PRS_RI_L4_PROTO_MASK);
1741 	if (err)
1742 		return err;
1743 
1744 	err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
1745 				  MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1746 				  MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1747 				  MVPP2_PRS_RI_CPU_CODE_MASK |
1748 				  MVPP2_PRS_RI_UDF3_MASK);
1749 	if (err)
1750 		return err;
1751 
1752 	/* IPv4 Broadcast */
1753 	err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
1754 	if (err)
1755 		return err;
1756 
1757 	/* IPv4 Multicast */
1758 	err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
1759 	if (err)
1760 		return err;
1761 
1762 	/* Default IPv4 entry for unknown protocols */
1763 	memset(&pe, 0, sizeof(pe));
1764 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1765 	pe.index = MVPP2_PE_IP4_PROTO_UN;
1766 
1767 	/* Finished: go to flowid generation */
1768 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1769 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1770 
1771 	/* Set L3 offset */
1772 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4,
1773 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1774 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1775 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1776 				 MVPP2_PRS_RI_L4_PROTO_MASK);
1777 
1778 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1779 				 MVPP2_PRS_IPV4_DIP_AI_BIT);
1780 	/* Unmask all ports */
1781 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1782 
1783 	/* Update shadow table and hw entry */
1784 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1785 	mvpp2_prs_hw_write(priv, &pe);
1786 
1787 	/* Default IPv4 entry for unicast address */
1788 	memset(&pe, 0, sizeof(pe));
1789 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1790 	pe.index = MVPP2_PE_IP4_ADDR_UN;
1791 
1792 	/* Go again to ipv4 */
1793 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1794 
1795 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1796 				 MVPP2_PRS_IPV4_DIP_AI_BIT);
1797 
1798 	/* Shift back to IPv4 proto */
1799 	mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1800 
1801 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1802 				 MVPP2_PRS_RI_L3_ADDR_MASK);
1803 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1804 
1805 	/* Unmask all ports */
1806 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1807 
1808 	/* Update shadow table and hw entry */
1809 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1810 	mvpp2_prs_hw_write(priv, &pe);
1811 
1812 	return 0;
1813 }
1814 
1815 /* Initialize entries for IPv6 */
mvpp2_prs_ip6_init(struct mvpp2 * priv)1816 static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
1817 {
1818 	struct mvpp2_prs_entry pe;
1819 	int tid, err;
1820 
1821 	/* Set entries for TCP, UDP and ICMP over IPv6 */
1822 	err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
1823 				  MVPP2_PRS_RI_L4_TCP,
1824 				  MVPP2_PRS_RI_L4_PROTO_MASK);
1825 	if (err)
1826 		return err;
1827 
1828 	err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
1829 				  MVPP2_PRS_RI_L4_UDP,
1830 				  MVPP2_PRS_RI_L4_PROTO_MASK);
1831 	if (err)
1832 		return err;
1833 
1834 	err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
1835 				  MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1836 				  MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1837 				  MVPP2_PRS_RI_CPU_CODE_MASK |
1838 				  MVPP2_PRS_RI_UDF3_MASK);
1839 	if (err)
1840 		return err;
1841 
1842 	/* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
1843 	/* Result Info: UDF7=1, DS lite */
1844 	err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
1845 				  MVPP2_PRS_RI_UDF7_IP6_LITE,
1846 				  MVPP2_PRS_RI_UDF7_MASK);
1847 	if (err)
1848 		return err;
1849 
1850 	/* IPv6 multicast */
1851 	err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
1852 	if (err)
1853 		return err;
1854 
1855 	/* Entry for checking hop limit */
1856 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1857 					MVPP2_PE_LAST_FREE_TID);
1858 	if (tid < 0)
1859 		return tid;
1860 
1861 	memset(&pe, 0, sizeof(pe));
1862 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1863 	pe.index = tid;
1864 
1865 	/* Finished: go to flowid generation */
1866 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1867 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1868 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
1869 				 MVPP2_PRS_RI_DROP_MASK,
1870 				 MVPP2_PRS_RI_L3_PROTO_MASK |
1871 				 MVPP2_PRS_RI_DROP_MASK);
1872 
1873 	mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
1874 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1875 				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1876 
1877 	/* Update shadow table and hw entry */
1878 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1879 	mvpp2_prs_hw_write(priv, &pe);
1880 
1881 	/* Default IPv6 entry for unknown protocols */
1882 	memset(&pe, 0, sizeof(pe));
1883 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1884 	pe.index = MVPP2_PE_IP6_PROTO_UN;
1885 
1886 	/* Finished: go to flowid generation */
1887 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1888 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1889 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1890 				 MVPP2_PRS_RI_L4_PROTO_MASK);
1891 	/* Set L4 offset relatively to our current place */
1892 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1893 				  sizeof(struct ipv6hdr) - 4,
1894 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1895 
1896 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1897 				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1898 	/* Unmask all ports */
1899 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1900 
1901 	/* Update shadow table and hw entry */
1902 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1903 	mvpp2_prs_hw_write(priv, &pe);
1904 
1905 	/* Default IPv6 entry for unknown ext protocols */
1906 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1907 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1908 	pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
1909 
1910 	/* Finished: go to flowid generation */
1911 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1912 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1913 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1914 				 MVPP2_PRS_RI_L4_PROTO_MASK);
1915 
1916 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
1917 				 MVPP2_PRS_IPV6_EXT_AI_BIT);
1918 	/* Unmask all ports */
1919 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1920 
1921 	/* Update shadow table and hw entry */
1922 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1923 	mvpp2_prs_hw_write(priv, &pe);
1924 
1925 	/* Default IPv6 entry for unicast address */
1926 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1927 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1928 	pe.index = MVPP2_PE_IP6_ADDR_UN;
1929 
1930 	/* Finished: go to IPv6 again */
1931 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1932 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1933 				 MVPP2_PRS_RI_L3_ADDR_MASK);
1934 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1935 				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1936 	/* Shift back to IPV6 NH */
1937 	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1938 
1939 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1940 	/* Unmask all ports */
1941 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1942 
1943 	/* Update shadow table and hw entry */
1944 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1945 	mvpp2_prs_hw_write(priv, &pe);
1946 
1947 	return 0;
1948 }
1949 
1950 /* Find tcam entry with matched pair <vid,port> */
mvpp2_prs_vid_range_find(struct mvpp2_port * port,u16 vid,u16 mask)1951 static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask)
1952 {
1953 	unsigned char byte[2], enable[2];
1954 	struct mvpp2_prs_entry pe;
1955 	u16 rvid, rmask;
1956 	int tid;
1957 
1958 	/* Go through the all entries with MVPP2_PRS_LU_VID */
1959 	for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
1960 	     tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
1961 		if (!port->priv->prs_shadow[tid].valid ||
1962 		    port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
1963 			continue;
1964 
1965 		__mvpp2_prs_init_from_hw(port->priv, &pe, tid);
1966 
1967 		mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
1968 		mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
1969 
1970 		rvid = ((byte[0] & 0xf) << 8) + byte[1];
1971 		rmask = ((enable[0] & 0xf) << 8) + enable[1];
1972 
1973 		if (rvid != vid || rmask != mask)
1974 			continue;
1975 
1976 		return tid;
1977 	}
1978 
1979 	return -ENOENT;
1980 }
1981 
1982 /* Write parser entry for VID filtering */
mvpp2_prs_vid_entry_add(struct mvpp2_port * port,u16 vid)1983 int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
1984 {
1985 	unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START +
1986 				 port->id * MVPP2_PRS_VLAN_FILT_MAX;
1987 	unsigned int mask = 0xfff, reg_val, shift;
1988 	struct mvpp2 *priv = port->priv;
1989 	struct mvpp2_prs_entry pe;
1990 	int tid;
1991 
1992 	memset(&pe, 0, sizeof(pe));
1993 
1994 	spin_lock_bh(&priv->prs_spinlock);
1995 
1996 	/* Scan TCAM and see if entry with this <vid,port> already exist */
1997 	tid = mvpp2_prs_vid_range_find(port, vid, mask);
1998 
1999 	reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
2000 	if (reg_val & MVPP2_DSA_EXTENDED)
2001 		shift = MVPP2_VLAN_TAG_EDSA_LEN;
2002 	else
2003 		shift = MVPP2_VLAN_TAG_LEN;
2004 
2005 	/* No such entry */
2006 	if (tid < 0) {
2007 
2008 		/* Go through all entries from first to last in vlan range */
2009 		tid = mvpp2_prs_tcam_first_free(priv, vid_start,
2010 						vid_start +
2011 						MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
2012 
2013 		/* There isn't room for a new VID filter */
2014 		if (tid < 0) {
2015 			spin_unlock_bh(&priv->prs_spinlock);
2016 			return tid;
2017 		}
2018 
2019 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2020 		pe.index = tid;
2021 
2022 		/* Mask all ports */
2023 		mvpp2_prs_tcam_port_map_set(&pe, 0);
2024 	} else {
2025 		__mvpp2_prs_init_from_hw(priv, &pe, tid);
2026 	}
2027 
2028 	/* Enable the current port */
2029 	mvpp2_prs_tcam_port_set(&pe, port->id, true);
2030 
2031 	/* Continue - set next lookup */
2032 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2033 
2034 	/* Skip VLAN header - Set offset to 4 or 8 bytes */
2035 	mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2036 
2037 	/* Set match on VID */
2038 	mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid);
2039 
2040 	/* Clear all ai bits for next iteration */
2041 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2042 
2043 	/* Update shadow table */
2044 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2045 	mvpp2_prs_hw_write(priv, &pe);
2046 
2047 	spin_unlock_bh(&priv->prs_spinlock);
2048 	return 0;
2049 }
2050 
2051 /* Write parser entry for VID filtering */
mvpp2_prs_vid_entry_remove(struct mvpp2_port * port,u16 vid)2052 void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
2053 {
2054 	struct mvpp2 *priv = port->priv;
2055 	int tid;
2056 
2057 	spin_lock_bh(&priv->prs_spinlock);
2058 
2059 	/* Invalidate TCAM entry with this <vid,port>, if it exists */
2060 	tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
2061 	if (tid >= 0) {
2062 		mvpp2_prs_hw_inv(priv, tid);
2063 		priv->prs_shadow[tid].valid = false;
2064 	}
2065 
2066 	spin_unlock_bh(&priv->prs_spinlock);
2067 }
2068 
2069 /* Remove all existing VID filters on this port */
mvpp2_prs_vid_remove_all(struct mvpp2_port * port)2070 void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
2071 {
2072 	struct mvpp2 *priv = port->priv;
2073 	int tid;
2074 
2075 	spin_lock_bh(&priv->prs_spinlock);
2076 
2077 	for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
2078 	     tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
2079 		if (priv->prs_shadow[tid].valid) {
2080 			mvpp2_prs_hw_inv(priv, tid);
2081 			priv->prs_shadow[tid].valid = false;
2082 		}
2083 	}
2084 
2085 	spin_unlock_bh(&priv->prs_spinlock);
2086 }
2087 
2088 /* Remove VID filering entry for this port */
mvpp2_prs_vid_disable_filtering(struct mvpp2_port * port)2089 void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
2090 {
2091 	unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
2092 	struct mvpp2 *priv = port->priv;
2093 
2094 	spin_lock_bh(&priv->prs_spinlock);
2095 
2096 	/* Invalidate the guard entry */
2097 	mvpp2_prs_hw_inv(priv, tid);
2098 
2099 	priv->prs_shadow[tid].valid = false;
2100 
2101 	spin_unlock_bh(&priv->prs_spinlock);
2102 }
2103 
2104 /* Add guard entry that drops packets when no VID is matched on this port */
mvpp2_prs_vid_enable_filtering(struct mvpp2_port * port)2105 void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
2106 {
2107 	unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
2108 	struct mvpp2 *priv = port->priv;
2109 	unsigned int reg_val, shift;
2110 	struct mvpp2_prs_entry pe;
2111 
2112 	if (priv->prs_shadow[tid].valid)
2113 		return;
2114 
2115 	memset(&pe, 0, sizeof(pe));
2116 
2117 	spin_lock_bh(&priv->prs_spinlock);
2118 
2119 	pe.index = tid;
2120 
2121 	reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
2122 	if (reg_val & MVPP2_DSA_EXTENDED)
2123 		shift = MVPP2_VLAN_TAG_EDSA_LEN;
2124 	else
2125 		shift = MVPP2_VLAN_TAG_LEN;
2126 
2127 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2128 
2129 	/* Mask all ports */
2130 	mvpp2_prs_tcam_port_map_set(&pe, 0);
2131 
2132 	/* Update port mask */
2133 	mvpp2_prs_tcam_port_set(&pe, port->id, true);
2134 
2135 	/* Continue - set next lookup */
2136 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2137 
2138 	/* Skip VLAN header - Set offset to 4 or 8 bytes */
2139 	mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2140 
2141 	/* Drop VLAN packets that don't belong to any VIDs on this port */
2142 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2143 				 MVPP2_PRS_RI_DROP_MASK);
2144 
2145 	/* Clear all ai bits for next iteration */
2146 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2147 
2148 	/* Update shadow table */
2149 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2150 	mvpp2_prs_hw_write(priv, &pe);
2151 
2152 	spin_unlock_bh(&priv->prs_spinlock);
2153 }
2154 
2155 /* Parser default initialization */
mvpp2_prs_default_init(struct platform_device * pdev,struct mvpp2 * priv)2156 int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
2157 {
2158 	int err, index, i;
2159 
2160 	priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2161 					sizeof(*priv->prs_shadow),
2162 					GFP_KERNEL);
2163 	if (!priv->prs_shadow)
2164 		return -ENOMEM;
2165 
2166 	priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
2167 					      MVPP2_PRS_DBL_VLANS_MAX,
2168 					      GFP_KERNEL);
2169 	if (!priv->prs_double_vlans)
2170 		return -ENOMEM;
2171 
2172 	spin_lock_bh(&priv->prs_spinlock);
2173 
2174 	/* Enable tcam table */
2175 	mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2176 
2177 	/* Clear all tcam and sram entries */
2178 	for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2179 		mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2180 		for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2181 			mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2182 
2183 		mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2184 		for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2185 			mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2186 	}
2187 
2188 	/* Invalidate all tcam entries */
2189 	for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2190 		mvpp2_prs_hw_inv(priv, index);
2191 
2192 	/* Always start from lookup = 0 */
2193 	for (index = 0; index < MVPP2_MAX_PORTS; index++)
2194 		mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2195 				       MVPP2_PRS_PORT_LU_MAX, 0);
2196 
2197 	mvpp2_prs_def_flow_init(priv);
2198 
2199 	mvpp2_prs_mh_init(priv);
2200 
2201 	mvpp2_prs_mac_init(priv);
2202 
2203 	mvpp2_prs_dsa_init(priv);
2204 
2205 	mvpp2_prs_vid_init(priv);
2206 
2207 	err = mvpp2_prs_etype_init(priv);
2208 	err = err ? : mvpp2_prs_vlan_init(pdev, priv);
2209 	err = err ? : mvpp2_prs_pppoe_init(priv);
2210 	err = err ? : mvpp2_prs_ip6_init(priv);
2211 	err = err ? : mvpp2_prs_ip4_init(priv);
2212 
2213 	spin_unlock_bh(&priv->prs_spinlock);
2214 	return err;
2215 }
2216 
2217 /* Compare MAC DA with tcam entry data */
mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry * pe,const u8 * da,unsigned char * mask)2218 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2219 				       const u8 *da, unsigned char *mask)
2220 {
2221 	unsigned char tcam_byte, tcam_mask;
2222 	int index;
2223 
2224 	for (index = 0; index < ETH_ALEN; index++) {
2225 		mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2226 		if (tcam_mask != mask[index])
2227 			return false;
2228 
2229 		if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2230 			return false;
2231 	}
2232 
2233 	return true;
2234 }
2235 
2236 /* Find tcam entry with matched pair <MAC DA, port> */
2237 static int
mvpp2_prs_mac_da_range_find(struct mvpp2 * priv,int pmap,const u8 * da,unsigned char * mask,int udf_type)2238 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2239 			    unsigned char *mask, int udf_type)
2240 {
2241 	struct mvpp2_prs_entry pe;
2242 	int tid;
2243 
2244 	/* Go through the all entires with MVPP2_PRS_LU_MAC */
2245 	for (tid = MVPP2_PE_MAC_RANGE_START;
2246 	     tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
2247 		unsigned int entry_pmap;
2248 
2249 		if (!priv->prs_shadow[tid].valid ||
2250 		    (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2251 		    (priv->prs_shadow[tid].udf != udf_type))
2252 			continue;
2253 
2254 		__mvpp2_prs_init_from_hw(priv, &pe, tid);
2255 		entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
2256 
2257 		if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
2258 		    entry_pmap == pmap)
2259 			return tid;
2260 	}
2261 
2262 	return -ENOENT;
2263 }
2264 
2265 /* Update parser's mac da entry */
__mvpp2_prs_mac_da_accept(struct mvpp2_port * port,const u8 * da,bool add)2266 static int __mvpp2_prs_mac_da_accept(struct mvpp2_port *port,
2267 				     const u8 *da, bool add)
2268 {
2269 	unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2270 	struct mvpp2 *priv = port->priv;
2271 	unsigned int pmap, len, ri;
2272 	struct mvpp2_prs_entry pe;
2273 	int tid;
2274 
2275 	memset(&pe, 0, sizeof(pe));
2276 
2277 	/* Scan TCAM and see if entry with this <MAC DA, port> already exist */
2278 	tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask,
2279 					  MVPP2_PRS_UDF_MAC_DEF);
2280 
2281 	/* No such entry */
2282 	if (tid < 0) {
2283 		if (!add)
2284 			return 0;
2285 
2286 		/* Create new TCAM entry */
2287 		/* Go through the all entries from first to last */
2288 		tid = mvpp2_prs_tcam_first_free(priv,
2289 						MVPP2_PE_MAC_RANGE_START,
2290 						MVPP2_PE_MAC_RANGE_END);
2291 		if (tid < 0)
2292 			return tid;
2293 
2294 		pe.index = tid;
2295 
2296 		/* Mask all ports */
2297 		mvpp2_prs_tcam_port_map_set(&pe, 0);
2298 	} else {
2299 		__mvpp2_prs_init_from_hw(priv, &pe, tid);
2300 	}
2301 
2302 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2303 
2304 	/* Update port mask */
2305 	mvpp2_prs_tcam_port_set(&pe, port->id, add);
2306 
2307 	/* Invalidate the entry if no ports are left enabled */
2308 	pmap = mvpp2_prs_tcam_port_map_get(&pe);
2309 	if (pmap == 0) {
2310 		if (add)
2311 			return -EINVAL;
2312 
2313 		mvpp2_prs_hw_inv(priv, pe.index);
2314 		priv->prs_shadow[pe.index].valid = false;
2315 		return 0;
2316 	}
2317 
2318 	/* Continue - set next lookup */
2319 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
2320 
2321 	/* Set match on DA */
2322 	len = ETH_ALEN;
2323 	while (len--)
2324 		mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
2325 
2326 	/* Set result info bits */
2327 	if (is_broadcast_ether_addr(da)) {
2328 		ri = MVPP2_PRS_RI_L2_BCAST;
2329 	} else if (is_multicast_ether_addr(da)) {
2330 		ri = MVPP2_PRS_RI_L2_MCAST;
2331 	} else {
2332 		ri = MVPP2_PRS_RI_L2_UCAST;
2333 
2334 		if (ether_addr_equal(da, port->dev->dev_addr))
2335 			ri |= MVPP2_PRS_RI_MAC_ME_MASK;
2336 	}
2337 
2338 	mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2339 				 MVPP2_PRS_RI_MAC_ME_MASK);
2340 	mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2341 				MVPP2_PRS_RI_MAC_ME_MASK);
2342 
2343 	/* Shift to ethertype */
2344 	mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
2345 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2346 
2347 	/* Update shadow table and hw entry */
2348 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
2349 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2350 	mvpp2_prs_hw_write(priv, &pe);
2351 
2352 	return 0;
2353 }
2354 
mvpp2_prs_mac_da_accept(struct mvpp2_port * port,const u8 * da,bool add)2355 int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
2356 {
2357 	int err;
2358 
2359 	spin_lock_bh(&port->priv->prs_spinlock);
2360 	err = __mvpp2_prs_mac_da_accept(port, da, add);
2361 	spin_unlock_bh(&port->priv->prs_spinlock);
2362 
2363 	return err;
2364 }
2365 
mvpp2_prs_update_mac_da(struct net_device * dev,const u8 * da)2366 int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
2367 {
2368 	struct mvpp2_port *port = netdev_priv(dev);
2369 	int err;
2370 
2371 	/* Remove old parser entry */
2372 	err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false);
2373 	if (err)
2374 		return err;
2375 
2376 	/* Add new parser entry */
2377 	err = mvpp2_prs_mac_da_accept(port, da, true);
2378 	if (err)
2379 		return err;
2380 
2381 	/* Set addr in the device */
2382 	eth_hw_addr_set(dev, da);
2383 
2384 	return 0;
2385 }
2386 
mvpp2_prs_mac_del_all(struct mvpp2_port * port)2387 void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
2388 {
2389 	struct mvpp2 *priv = port->priv;
2390 	struct mvpp2_prs_entry pe;
2391 	unsigned long pmap;
2392 	int index, tid;
2393 
2394 	spin_lock_bh(&priv->prs_spinlock);
2395 
2396 	for (tid = MVPP2_PE_MAC_RANGE_START;
2397 	     tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
2398 		unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
2399 
2400 		if (!priv->prs_shadow[tid].valid ||
2401 		    (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2402 		    (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
2403 			continue;
2404 
2405 		__mvpp2_prs_init_from_hw(priv, &pe, tid);
2406 
2407 		pmap = mvpp2_prs_tcam_port_map_get(&pe);
2408 
2409 		/* We only want entries active on this port */
2410 		if (!test_bit(port->id, &pmap))
2411 			continue;
2412 
2413 		/* Read mac addr from entry */
2414 		for (index = 0; index < ETH_ALEN; index++)
2415 			mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
2416 						     &da_mask[index]);
2417 
2418 		/* Special cases : Don't remove broadcast and port's own
2419 		 * address
2420 		 */
2421 		if (is_broadcast_ether_addr(da) ||
2422 		    ether_addr_equal(da, port->dev->dev_addr))
2423 			continue;
2424 
2425 		/* Remove entry from TCAM */
2426 		__mvpp2_prs_mac_da_accept(port, da, false);
2427 	}
2428 
2429 	spin_unlock_bh(&priv->prs_spinlock);
2430 }
2431 
mvpp2_prs_tag_mode_set(struct mvpp2 * priv,int port,int type)2432 int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
2433 {
2434 	switch (type) {
2435 	case MVPP2_TAG_TYPE_EDSA:
2436 		spin_lock_bh(&priv->prs_spinlock);
2437 		/* Add port to EDSA entries */
2438 		mvpp2_prs_dsa_tag_set(priv, port, true,
2439 				      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2440 		mvpp2_prs_dsa_tag_set(priv, port, true,
2441 				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2442 		/* Remove port from DSA entries */
2443 		mvpp2_prs_dsa_tag_set(priv, port, false,
2444 				      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2445 		mvpp2_prs_dsa_tag_set(priv, port, false,
2446 				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2447 		spin_unlock_bh(&priv->prs_spinlock);
2448 		break;
2449 
2450 	case MVPP2_TAG_TYPE_DSA:
2451 		spin_lock_bh(&priv->prs_spinlock);
2452 		/* Add port to DSA entries */
2453 		mvpp2_prs_dsa_tag_set(priv, port, true,
2454 				      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2455 		mvpp2_prs_dsa_tag_set(priv, port, true,
2456 				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2457 		/* Remove port from EDSA entries */
2458 		mvpp2_prs_dsa_tag_set(priv, port, false,
2459 				      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2460 		mvpp2_prs_dsa_tag_set(priv, port, false,
2461 				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2462 		spin_unlock_bh(&priv->prs_spinlock);
2463 		break;
2464 
2465 	case MVPP2_TAG_TYPE_MH:
2466 	case MVPP2_TAG_TYPE_NONE:
2467 		spin_lock_bh(&priv->prs_spinlock);
2468 		/* Remove port form EDSA and DSA entries */
2469 		mvpp2_prs_dsa_tag_set(priv, port, false,
2470 				      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2471 		mvpp2_prs_dsa_tag_set(priv, port, false,
2472 				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2473 		mvpp2_prs_dsa_tag_set(priv, port, false,
2474 				      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2475 		mvpp2_prs_dsa_tag_set(priv, port, false,
2476 				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2477 		spin_unlock_bh(&priv->prs_spinlock);
2478 		break;
2479 
2480 	default:
2481 		if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
2482 			return -EINVAL;
2483 	}
2484 
2485 	return 0;
2486 }
2487 
mvpp2_prs_add_flow(struct mvpp2 * priv,int flow,u32 ri,u32 ri_mask)2488 int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
2489 {
2490 	struct mvpp2_prs_entry pe;
2491 	u8 *ri_byte, *ri_byte_mask;
2492 	int tid, i;
2493 
2494 	memset(&pe, 0, sizeof(pe));
2495 
2496 	spin_lock_bh(&priv->prs_spinlock);
2497 
2498 	tid = mvpp2_prs_tcam_first_free(priv,
2499 					MVPP2_PE_LAST_FREE_TID,
2500 					MVPP2_PE_FIRST_FREE_TID);
2501 	if (tid < 0) {
2502 		spin_unlock_bh(&priv->prs_spinlock);
2503 		return tid;
2504 	}
2505 
2506 	pe.index = tid;
2507 
2508 	ri_byte = (u8 *)&ri;
2509 	ri_byte_mask = (u8 *)&ri_mask;
2510 
2511 	mvpp2_prs_sram_ai_update(&pe, flow, MVPP2_PRS_FLOW_ID_MASK);
2512 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2513 
2514 	for (i = 0; i < 4; i++) {
2515 		mvpp2_prs_tcam_data_byte_set(&pe, i, ri_byte[i],
2516 					     ri_byte_mask[i]);
2517 	}
2518 
2519 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2520 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2521 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2522 	mvpp2_prs_hw_write(priv, &pe);
2523 
2524 	spin_unlock_bh(&priv->prs_spinlock);
2525 	return 0;
2526 }
2527 
2528 /* Set prs flow for the port */
mvpp2_prs_def_flow(struct mvpp2_port * port)2529 int mvpp2_prs_def_flow(struct mvpp2_port *port)
2530 {
2531 	struct mvpp2_prs_entry pe;
2532 	int tid;
2533 
2534 	memset(&pe, 0, sizeof(pe));
2535 
2536 	spin_lock_bh(&port->priv->prs_spinlock);
2537 
2538 	tid = mvpp2_prs_flow_find(port->priv, port->id);
2539 
2540 	/* Such entry not exist */
2541 	if (tid < 0) {
2542 		/* Go through the all entires from last to first */
2543 		tid = mvpp2_prs_tcam_first_free(port->priv,
2544 						MVPP2_PE_LAST_FREE_TID,
2545 					       MVPP2_PE_FIRST_FREE_TID);
2546 		if (tid < 0) {
2547 			spin_unlock_bh(&port->priv->prs_spinlock);
2548 			return tid;
2549 		}
2550 
2551 		pe.index = tid;
2552 
2553 		/* Set flow ID*/
2554 		mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
2555 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2556 
2557 		/* Update shadow table */
2558 		mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS);
2559 	} else {
2560 		__mvpp2_prs_init_from_hw(port->priv, &pe, tid);
2561 	}
2562 
2563 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2564 	mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id));
2565 	mvpp2_prs_hw_write(port->priv, &pe);
2566 
2567 	spin_unlock_bh(&port->priv->prs_spinlock);
2568 	return 0;
2569 }
2570 
mvpp2_prs_hits(struct mvpp2 * priv,int index)2571 int mvpp2_prs_hits(struct mvpp2 *priv, int index)
2572 {
2573 	u32 val;
2574 
2575 	if (index > MVPP2_PRS_TCAM_SRAM_SIZE)
2576 		return -EINVAL;
2577 
2578 	spin_lock_bh(&priv->prs_spinlock);
2579 
2580 	mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index);
2581 
2582 	val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG);
2583 
2584 	val &= MVPP2_PRS_TCAM_HIT_CNT_MASK;
2585 
2586 	spin_unlock_bh(&priv->prs_spinlock);
2587 	return val;
2588 }
2589