1 /* Applied Micro X-Gene SoC Ethernet Classifier structures
2  *
3  * Copyright (c) 2016, Applied Micro Circuits Corporation
4  * Authors: Khuong Dinh <kdinh@apm.com>
5  *          Tanmay Inamdar <tinamdar@apm.com>
6  *          Iyappan Subramanian <isubramanian@apm.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #include "xgene_enet_main.h"
23 
24 /* interfaces to convert structures to HW recognized bit formats */
25 static void xgene_cle_sband_to_hw(u8 frag, enum xgene_cle_prot_version ver,
26 				  enum xgene_cle_prot_type type, u32 len,
27 				  u32 *reg)
28 {
29 	*reg =  SET_VAL(SB_IPFRAG, frag) |
30 		SET_VAL(SB_IPPROT, type) |
31 		SET_VAL(SB_IPVER, ver) |
32 		SET_VAL(SB_HDRLEN, len);
33 }
34 
35 static void xgene_cle_idt_to_hw(struct xgene_enet_pdata *pdata,
36 				u32 dstqid, u32 fpsel,
37 				u32 nfpsel, u32 *idt_reg)
38 {
39 	if (pdata->enet_id == XGENE_ENET1) {
40 		*idt_reg = SET_VAL(IDT_DSTQID, dstqid) |
41 			   SET_VAL(IDT_FPSEL1, fpsel)  |
42 			   SET_VAL(IDT_NFPSEL1, nfpsel);
43 	} else {
44 		*idt_reg = SET_VAL(IDT_DSTQID, dstqid) |
45 			   SET_VAL(IDT_FPSEL, fpsel)   |
46 			   SET_VAL(IDT_NFPSEL, nfpsel);
47 	}
48 }
49 
50 static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata,
51 				  struct xgene_cle_dbptr *dbptr, u32 *buf)
52 {
53 	buf[0] = SET_VAL(CLE_DROP, dbptr->drop);
54 	buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) |
55 		 SET_VAL(CLE_NFPSEL, dbptr->nxtfpsel) |
56 		 SET_VAL(CLE_DSTQIDL, dbptr->dstqid);
57 
58 	buf[5] = SET_VAL(CLE_DSTQIDH, (u32)dbptr->dstqid >> CLE_DSTQIDL_LEN) |
59 		 SET_VAL(CLE_PRIORITY, dbptr->cle_priority);
60 }
61 
62 static void xgene_cle_kn_to_hw(struct xgene_cle_ptree_kn *kn, u32 *buf)
63 {
64 	u32 i, j = 0;
65 	u32 data;
66 
67 	buf[j++] = SET_VAL(CLE_TYPE, kn->node_type);
68 	for (i = 0; i < kn->num_keys; i++) {
69 		struct xgene_cle_ptree_key *key = &kn->key[i];
70 
71 		if (!(i % 2)) {
72 			buf[j] = SET_VAL(CLE_KN_PRIO, key->priority) |
73 				 SET_VAL(CLE_KN_RPTR, key->result_pointer);
74 		} else {
75 			data = SET_VAL(CLE_KN_PRIO, key->priority) |
76 			       SET_VAL(CLE_KN_RPTR, key->result_pointer);
77 			buf[j++] |= (data << 16);
78 		}
79 	}
80 }
81 
82 static void xgene_cle_dn_to_hw(const struct xgene_cle_ptree_ewdn *dn,
83 			       u32 *buf, u32 jb)
84 {
85 	const struct xgene_cle_ptree_branch *br;
86 	u32 i, j = 0;
87 	u32 npp;
88 
89 	buf[j++] = SET_VAL(CLE_DN_TYPE, dn->node_type) |
90 		   SET_VAL(CLE_DN_LASTN, dn->last_node) |
91 		   SET_VAL(CLE_DN_HLS, dn->hdr_len_store) |
92 		   SET_VAL(CLE_DN_EXT, dn->hdr_extn) |
93 		   SET_VAL(CLE_DN_BSTOR, dn->byte_store) |
94 		   SET_VAL(CLE_DN_SBSTOR, dn->search_byte_store) |
95 		   SET_VAL(CLE_DN_RPTR, dn->result_pointer);
96 
97 	for (i = 0; i < dn->num_branches; i++) {
98 		br = &dn->branch[i];
99 		npp = br->next_packet_pointer;
100 
101 		if ((br->jump_rel == JMP_ABS) && (npp < CLE_PKTRAM_SIZE))
102 			npp += jb;
103 
104 		buf[j++] = SET_VAL(CLE_BR_VALID, br->valid) |
105 			   SET_VAL(CLE_BR_NPPTR, npp) |
106 			   SET_VAL(CLE_BR_JB, br->jump_bw) |
107 			   SET_VAL(CLE_BR_JR, br->jump_rel) |
108 			   SET_VAL(CLE_BR_OP, br->operation) |
109 			   SET_VAL(CLE_BR_NNODE, br->next_node) |
110 			   SET_VAL(CLE_BR_NBR, br->next_branch);
111 
112 		buf[j++] = SET_VAL(CLE_BR_DATA, br->data) |
113 			   SET_VAL(CLE_BR_MASK, br->mask);
114 	}
115 }
116 
117 static int xgene_cle_poll_cmd_done(void __iomem *base,
118 				   enum xgene_cle_cmd_type cmd)
119 {
120 	u32 status, loop = 10;
121 	int ret = -EBUSY;
122 
123 	while (loop--) {
124 		status = ioread32(base + INDCMD_STATUS);
125 		if (status & cmd) {
126 			ret = 0;
127 			break;
128 		}
129 		usleep_range(1000, 2000);
130 	}
131 
132 	return ret;
133 }
134 
135 static int xgene_cle_dram_wr(struct xgene_enet_cle *cle, u32 *data, u8 nregs,
136 			     u32 index, enum xgene_cle_dram_type type,
137 			     enum xgene_cle_cmd_type cmd)
138 {
139 	enum xgene_cle_parser parser = cle->active_parser;
140 	void __iomem *base = cle->base;
141 	u32 i, j, ind_addr;
142 	u8 port, nparsers;
143 	int ret = 0;
144 
145 	/* PTREE_RAM onwards, DRAM regions are common for all parsers */
146 	nparsers = (type >= PTREE_RAM) ? 1 : cle->parsers;
147 
148 	for (i = 0; i < nparsers; i++) {
149 		port = i;
150 		if ((type < PTREE_RAM) && (parser != PARSER_ALL))
151 			port = parser;
152 
153 		ind_addr = XGENE_CLE_DRAM(type + (port * 4)) | index;
154 		iowrite32(ind_addr, base + INDADDR);
155 		for (j = 0; j < nregs; j++)
156 			iowrite32(data[j], base + DATA_RAM0 + (j * 4));
157 		iowrite32(cmd, base + INDCMD);
158 
159 		ret = xgene_cle_poll_cmd_done(base, cmd);
160 		if (ret)
161 			break;
162 	}
163 
164 	return ret;
165 }
166 
167 static void xgene_cle_enable_ptree(struct xgene_enet_pdata *pdata,
168 				   struct xgene_enet_cle *cle)
169 {
170 	struct xgene_cle_ptree *ptree = &cle->ptree;
171 	void __iomem *addr, *base = cle->base;
172 	u32 offset = CLE_PORT_OFFSET;
173 	u32 i;
174 
175 	/* 1G port has to advance 4 bytes and 10G has to advance 8 bytes */
176 	ptree->start_pkt += cle->jump_bytes;
177 	for (i = 0; i < cle->parsers; i++) {
178 		if (cle->active_parser != PARSER_ALL)
179 			addr = base + cle->active_parser * offset;
180 		else
181 			addr = base + (i * offset);
182 
183 		iowrite32(ptree->start_node & 0x3fff, addr + SNPTR0);
184 		iowrite32(ptree->start_pkt & 0x1ff, addr + SPPTR0);
185 	}
186 }
187 
188 static int xgene_cle_setup_dbptr(struct xgene_enet_pdata *pdata,
189 				 struct xgene_enet_cle *cle)
190 {
191 	struct xgene_cle_ptree *ptree = &cle->ptree;
192 	u32 buf[CLE_DRAM_REGS];
193 	u32 i;
194 	int ret;
195 
196 	memset(buf, 0, sizeof(buf));
197 	for (i = 0; i < ptree->num_dbptr; i++) {
198 		xgene_cle_dbptr_to_hw(pdata, &ptree->dbptr[i], buf);
199 		ret = xgene_cle_dram_wr(cle, buf, 6, i + ptree->start_dbptr,
200 					DB_RAM,	CLE_CMD_WR);
201 		if (ret)
202 			return ret;
203 	}
204 
205 	return 0;
206 }
207 
208 static const struct xgene_cle_ptree_ewdn xgene_init_ptree_dn[] = {
209 	{
210 		/* PKT_TYPE_NODE */
211 		.node_type = EWDN,
212 		.last_node = 0,
213 		.hdr_len_store = 1,
214 		.hdr_extn = NO_BYTE,
215 		.byte_store = NO_BYTE,
216 		.search_byte_store = NO_BYTE,
217 		.result_pointer = DB_RES_DROP,
218 		.num_branches = 2,
219 		.branch = {
220 			{
221 				/* IPV4 */
222 				.valid = 1,
223 				.next_packet_pointer = 22,
224 				.jump_bw = JMP_FW,
225 				.jump_rel = JMP_ABS,
226 				.operation = EQT,
227 				.next_node = PKT_PROT_NODE,
228 				.next_branch = 0,
229 				.data = 0x8,
230 				.mask = 0x0
231 			},
232 			{
233 				.valid = 0,
234 				.next_packet_pointer = 262,
235 				.jump_bw = JMP_FW,
236 				.jump_rel = JMP_ABS,
237 				.operation = EQT,
238 				.next_node = LAST_NODE,
239 				.next_branch = 0,
240 				.data = 0x0,
241 				.mask = 0xffff
242 			}
243 		},
244 	},
245 	{
246 		/* PKT_PROT_NODE */
247 		.node_type = EWDN,
248 		.last_node = 0,
249 		.hdr_len_store = 1,
250 		.hdr_extn = NO_BYTE,
251 		.byte_store = NO_BYTE,
252 		.search_byte_store = NO_BYTE,
253 		.result_pointer = DB_RES_DROP,
254 		.num_branches = 3,
255 		.branch = {
256 			{
257 				/* TCP */
258 				.valid = 1,
259 				.next_packet_pointer = 26,
260 				.jump_bw = JMP_FW,
261 				.jump_rel = JMP_ABS,
262 				.operation = EQT,
263 				.next_node = RSS_IPV4_TCP_NODE,
264 				.next_branch = 0,
265 				.data = 0x0600,
266 				.mask = 0x00ff
267 			},
268 			{
269 				/* UDP */
270 				.valid = 1,
271 				.next_packet_pointer = 26,
272 				.jump_bw = JMP_FW,
273 				.jump_rel = JMP_ABS,
274 				.operation = EQT,
275 				.next_node = RSS_IPV4_UDP_NODE,
276 				.next_branch = 0,
277 				.data = 0x1100,
278 				.mask = 0x00ff
279 			},
280 			{
281 				.valid = 0,
282 				.next_packet_pointer = 26,
283 				.jump_bw = JMP_FW,
284 				.jump_rel = JMP_ABS,
285 				.operation = EQT,
286 				.next_node = RSS_IPV4_OTHERS_NODE,
287 				.next_branch = 0,
288 				.data = 0x0,
289 				.mask = 0xffff
290 			}
291 		}
292 	},
293 	{
294 		/* RSS_IPV4_TCP_NODE */
295 		.node_type = EWDN,
296 		.last_node = 0,
297 		.hdr_len_store = 1,
298 		.hdr_extn = NO_BYTE,
299 		.byte_store = NO_BYTE,
300 		.search_byte_store = BOTH_BYTES,
301 		.result_pointer = DB_RES_DROP,
302 		.num_branches = 6,
303 		.branch = {
304 			{
305 				/* SRC IPV4 B01 */
306 				.valid = 0,
307 				.next_packet_pointer = 28,
308 				.jump_bw = JMP_FW,
309 				.jump_rel = JMP_ABS,
310 				.operation = EQT,
311 				.next_node = RSS_IPV4_TCP_NODE,
312 				.next_branch = 1,
313 				.data = 0x0,
314 				.mask = 0xffff
315 			},
316 			{
317 				/* SRC IPV4 B23 */
318 				.valid = 0,
319 				.next_packet_pointer = 30,
320 				.jump_bw = JMP_FW,
321 				.jump_rel = JMP_ABS,
322 				.operation = EQT,
323 				.next_node = RSS_IPV4_TCP_NODE,
324 				.next_branch = 2,
325 				.data = 0x0,
326 				.mask = 0xffff
327 			},
328 			{
329 				/* DST IPV4 B01 */
330 				.valid = 0,
331 				.next_packet_pointer = 32,
332 				.jump_bw = JMP_FW,
333 				.jump_rel = JMP_ABS,
334 				.operation = EQT,
335 				.next_node = RSS_IPV4_TCP_NODE,
336 				.next_branch = 3,
337 				.data = 0x0,
338 				.mask = 0xffff
339 			},
340 			{
341 				/* DST IPV4 B23 */
342 				.valid = 0,
343 				.next_packet_pointer = 34,
344 				.jump_bw = JMP_FW,
345 				.jump_rel = JMP_ABS,
346 				.operation = EQT,
347 				.next_node = RSS_IPV4_TCP_NODE,
348 				.next_branch = 4,
349 				.data = 0x0,
350 				.mask = 0xffff
351 			},
352 			{
353 				/* TCP SRC Port */
354 				.valid = 0,
355 				.next_packet_pointer = 36,
356 				.jump_bw = JMP_FW,
357 				.jump_rel = JMP_ABS,
358 				.operation = EQT,
359 				.next_node = RSS_IPV4_TCP_NODE,
360 				.next_branch = 5,
361 				.data = 0x0,
362 				.mask = 0xffff
363 			},
364 			{
365 				/* TCP DST Port */
366 				.valid = 0,
367 				.next_packet_pointer = 256,
368 				.jump_bw = JMP_FW,
369 				.jump_rel = JMP_ABS,
370 				.operation = EQT,
371 				.next_node = LAST_NODE,
372 				.next_branch = 0,
373 				.data = 0x0,
374 				.mask = 0xffff
375 			}
376 		}
377 	},
378 	{
379 		/* RSS_IPV4_UDP_NODE */
380 		.node_type = EWDN,
381 		.last_node = 0,
382 		.hdr_len_store = 1,
383 		.hdr_extn = NO_BYTE,
384 		.byte_store = NO_BYTE,
385 		.search_byte_store = BOTH_BYTES,
386 		.result_pointer = DB_RES_DROP,
387 		.num_branches = 6,
388 		.branch = {
389 			{
390 				/* SRC IPV4 B01 */
391 				.valid = 0,
392 				.next_packet_pointer = 28,
393 				.jump_bw = JMP_FW,
394 				.jump_rel = JMP_ABS,
395 				.operation = EQT,
396 				.next_node = RSS_IPV4_UDP_NODE,
397 				.next_branch = 1,
398 				.data = 0x0,
399 				.mask = 0xffff
400 			},
401 			{
402 				/* SRC IPV4 B23 */
403 				.valid = 0,
404 				.next_packet_pointer = 30,
405 				.jump_bw = JMP_FW,
406 				.jump_rel = JMP_ABS,
407 				.operation = EQT,
408 				.next_node = RSS_IPV4_UDP_NODE,
409 				.next_branch = 2,
410 				.data = 0x0,
411 				.mask = 0xffff
412 			},
413 			{
414 				/* DST IPV4 B01 */
415 				.valid = 0,
416 				.next_packet_pointer = 32,
417 				.jump_bw = JMP_FW,
418 				.jump_rel = JMP_ABS,
419 				.operation = EQT,
420 				.next_node = RSS_IPV4_UDP_NODE,
421 				.next_branch = 3,
422 				.data = 0x0,
423 				.mask = 0xffff
424 			},
425 			{
426 				/* DST IPV4 B23 */
427 				.valid = 0,
428 				.next_packet_pointer = 34,
429 				.jump_bw = JMP_FW,
430 				.jump_rel = JMP_ABS,
431 				.operation = EQT,
432 				.next_node = RSS_IPV4_UDP_NODE,
433 				.next_branch = 4,
434 				.data = 0x0,
435 				.mask = 0xffff
436 			},
437 			{
438 				/* TCP SRC Port */
439 				.valid = 0,
440 				.next_packet_pointer = 36,
441 				.jump_bw = JMP_FW,
442 				.jump_rel = JMP_ABS,
443 				.operation = EQT,
444 				.next_node = RSS_IPV4_UDP_NODE,
445 				.next_branch = 5,
446 				.data = 0x0,
447 				.mask = 0xffff
448 			},
449 			{
450 				/* TCP DST Port */
451 				.valid = 0,
452 				.next_packet_pointer = 258,
453 				.jump_bw = JMP_FW,
454 				.jump_rel = JMP_ABS,
455 				.operation = EQT,
456 				.next_node = LAST_NODE,
457 				.next_branch = 0,
458 				.data = 0x0,
459 				.mask = 0xffff
460 			}
461 		}
462 	},
463 	{
464 		/* RSS_IPV4_OTHERS_NODE */
465 		.node_type = EWDN,
466 		.last_node = 0,
467 		.hdr_len_store = 1,
468 		.hdr_extn = NO_BYTE,
469 		.byte_store = NO_BYTE,
470 		.search_byte_store = BOTH_BYTES,
471 		.result_pointer = DB_RES_DROP,
472 		.num_branches = 6,
473 		.branch = {
474 			{
475 				/* SRC IPV4 B01 */
476 				.valid = 0,
477 				.next_packet_pointer = 28,
478 				.jump_bw = JMP_FW,
479 				.jump_rel = JMP_ABS,
480 				.operation = EQT,
481 				.next_node = RSS_IPV4_OTHERS_NODE,
482 				.next_branch = 1,
483 				.data = 0x0,
484 				.mask = 0xffff
485 			},
486 			{
487 				/* SRC IPV4 B23 */
488 				.valid = 0,
489 				.next_packet_pointer = 30,
490 				.jump_bw = JMP_FW,
491 				.jump_rel = JMP_ABS,
492 				.operation = EQT,
493 				.next_node = RSS_IPV4_OTHERS_NODE,
494 				.next_branch = 2,
495 				.data = 0x0,
496 				.mask = 0xffff
497 			},
498 			{
499 				/* DST IPV4 B01 */
500 				.valid = 0,
501 				.next_packet_pointer = 32,
502 				.jump_bw = JMP_FW,
503 				.jump_rel = JMP_ABS,
504 				.operation = EQT,
505 				.next_node = RSS_IPV4_OTHERS_NODE,
506 				.next_branch = 3,
507 				.data = 0x0,
508 				.mask = 0xffff
509 			},
510 			{
511 				/* DST IPV4 B23 */
512 				.valid = 0,
513 				.next_packet_pointer = 34,
514 				.jump_bw = JMP_FW,
515 				.jump_rel = JMP_ABS,
516 				.operation = EQT,
517 				.next_node = RSS_IPV4_OTHERS_NODE,
518 				.next_branch = 4,
519 				.data = 0x0,
520 				.mask = 0xffff
521 			},
522 			{
523 				/* TCP SRC Port */
524 				.valid = 0,
525 				.next_packet_pointer = 36,
526 				.jump_bw = JMP_FW,
527 				.jump_rel = JMP_ABS,
528 				.operation = EQT,
529 				.next_node = RSS_IPV4_OTHERS_NODE,
530 				.next_branch = 5,
531 				.data = 0x0,
532 				.mask = 0xffff
533 			},
534 			{
535 				/* TCP DST Port */
536 				.valid = 0,
537 				.next_packet_pointer = 260,
538 				.jump_bw = JMP_FW,
539 				.jump_rel = JMP_ABS,
540 				.operation = EQT,
541 				.next_node = LAST_NODE,
542 				.next_branch = 0,
543 				.data = 0x0,
544 				.mask = 0xffff
545 			}
546 		}
547 	},
548 
549 	{
550 		/* LAST NODE */
551 		.node_type = EWDN,
552 		.last_node = 1,
553 		.hdr_len_store = 1,
554 		.hdr_extn = NO_BYTE,
555 		.byte_store = NO_BYTE,
556 		.search_byte_store = NO_BYTE,
557 		.result_pointer = DB_RES_DROP,
558 		.num_branches = 1,
559 		.branch = {
560 			{
561 				.valid = 0,
562 				.next_packet_pointer = 0,
563 				.jump_bw = JMP_FW,
564 				.jump_rel = JMP_ABS,
565 				.operation = EQT,
566 				.next_node = MAX_NODES,
567 				.next_branch = 0,
568 				.data = 0,
569 				.mask = 0xffff
570 			}
571 		}
572 	}
573 };
574 
575 static int xgene_cle_setup_node(struct xgene_enet_pdata *pdata,
576 				struct xgene_enet_cle *cle)
577 {
578 	struct xgene_cle_ptree *ptree = &cle->ptree;
579 	const struct xgene_cle_ptree_ewdn *dn = xgene_init_ptree_dn;
580 	int num_dn = ARRAY_SIZE(xgene_init_ptree_dn);
581 	struct xgene_cle_ptree_kn *kn = ptree->kn;
582 	u32 buf[CLE_DRAM_REGS];
583 	int i, j, ret;
584 
585 	memset(buf, 0, sizeof(buf));
586 	for (i = 0; i < num_dn; i++) {
587 		xgene_cle_dn_to_hw(&dn[i], buf, cle->jump_bytes);
588 		ret = xgene_cle_dram_wr(cle, buf, 17, i + ptree->start_node,
589 					PTREE_RAM, CLE_CMD_WR);
590 		if (ret)
591 			return ret;
592 	}
593 
594 	/* continue node index for key node */
595 	memset(buf, 0, sizeof(buf));
596 	for (j = i; j < (ptree->num_kn + num_dn); j++) {
597 		xgene_cle_kn_to_hw(&kn[j - num_dn], buf);
598 		ret = xgene_cle_dram_wr(cle, buf, 17, j + ptree->start_node,
599 					PTREE_RAM, CLE_CMD_WR);
600 		if (ret)
601 			return ret;
602 	}
603 
604 	return 0;
605 }
606 
607 static int xgene_cle_setup_ptree(struct xgene_enet_pdata *pdata,
608 				 struct xgene_enet_cle *cle)
609 {
610 	int ret;
611 
612 	ret = xgene_cle_setup_node(pdata, cle);
613 	if (ret)
614 		return ret;
615 
616 	ret = xgene_cle_setup_dbptr(pdata, cle);
617 	if (ret)
618 		return ret;
619 
620 	xgene_cle_enable_ptree(pdata, cle);
621 
622 	return 0;
623 }
624 
625 static void xgene_cle_setup_def_dbptr(struct xgene_enet_pdata *pdata,
626 				      struct xgene_enet_cle *enet_cle,
627 				      struct xgene_cle_dbptr *dbptr,
628 				      u32 index, u8 priority)
629 {
630 	void __iomem *base = enet_cle->base;
631 	void __iomem *base_addr;
632 	u32 buf[CLE_DRAM_REGS];
633 	u32 def_cls, offset;
634 	u32 i, j;
635 
636 	memset(buf, 0, sizeof(buf));
637 	xgene_cle_dbptr_to_hw(pdata, dbptr, buf);
638 
639 	for (i = 0; i < enet_cle->parsers; i++) {
640 		if (enet_cle->active_parser != PARSER_ALL) {
641 			offset = enet_cle->active_parser *
642 				CLE_PORT_OFFSET;
643 		} else {
644 			offset = i * CLE_PORT_OFFSET;
645 		}
646 
647 		base_addr = base + DFCLSRESDB00 + offset;
648 		for (j = 0; j < 6; j++)
649 			iowrite32(buf[j], base_addr + (j * 4));
650 
651 		def_cls = ((priority & 0x7) << 10) | (index & 0x3ff);
652 		iowrite32(def_cls, base + DFCLSRESDBPTR0 + offset);
653 	}
654 }
655 
656 static int xgene_cle_set_rss_sband(struct xgene_enet_cle *cle)
657 {
658 	u32 idx = CLE_PKTRAM_SIZE / sizeof(u32);
659 	u32 mac_hdr_len = ETH_HLEN;
660 	u32 sband, reg = 0;
661 	u32 ipv4_ihl = 5;
662 	u32 hdr_len;
663 	int ret;
664 
665 	/* Sideband: IPV4/TCP packets */
666 	hdr_len = (mac_hdr_len << 5) | ipv4_ihl;
667 	xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_TCP, hdr_len, &reg);
668 	sband = reg;
669 
670 	/* Sideband: IPv4/UDP packets */
671 	hdr_len = (mac_hdr_len << 5) | ipv4_ihl;
672 	xgene_cle_sband_to_hw(1, XGENE_CLE_IPV4, XGENE_CLE_UDP, hdr_len, &reg);
673 	sband |= (reg << 16);
674 
675 	ret = xgene_cle_dram_wr(cle, &sband, 1, idx, PKT_RAM, CLE_CMD_WR);
676 	if (ret)
677 		return ret;
678 
679 	/* Sideband: IPv4/RAW packets */
680 	hdr_len = (mac_hdr_len << 5) | ipv4_ihl;
681 	xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER,
682 			      hdr_len, &reg);
683 	sband = reg;
684 
685 	/* Sideband: Ethernet II/RAW packets */
686 	hdr_len = (mac_hdr_len << 5);
687 	xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER,
688 			      hdr_len, &reg);
689 	sband |= (reg << 16);
690 
691 	ret = xgene_cle_dram_wr(cle, &sband, 1, idx + 1, PKT_RAM, CLE_CMD_WR);
692 	if (ret)
693 		return ret;
694 
695 	return 0;
696 }
697 
698 static int xgene_cle_set_rss_skeys(struct xgene_enet_cle *cle)
699 {
700 	u32 secret_key_ipv4[4];  /* 16 Bytes*/
701 	int ret = 0;
702 
703 	get_random_bytes(secret_key_ipv4, 16);
704 	ret = xgene_cle_dram_wr(cle, secret_key_ipv4, 4, 0,
705 				RSS_IPV4_HASH_SKEY, CLE_CMD_WR);
706 	return ret;
707 }
708 
709 static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata)
710 {
711 	u32 fpsel, dstqid, nfpsel, idt_reg, idx;
712 	int i, ret = 0;
713 	u16 pool_id;
714 
715 	for (i = 0; i < XGENE_CLE_IDT_ENTRIES; i++) {
716 		idx = i % pdata->rxq_cnt;
717 		pool_id = pdata->rx_ring[idx]->buf_pool->id;
718 		fpsel = xgene_enet_get_fpsel(pool_id);
719 		dstqid = xgene_enet_dst_ring_num(pdata->rx_ring[idx]);
720 		nfpsel = 0;
721 		if (pdata->rx_ring[idx]->page_pool) {
722 			pool_id = pdata->rx_ring[idx]->page_pool->id;
723 			nfpsel = xgene_enet_get_fpsel(pool_id);
724 		}
725 
726 		idt_reg = 0;
727 		xgene_cle_idt_to_hw(pdata, dstqid, fpsel, nfpsel, &idt_reg);
728 		ret = xgene_cle_dram_wr(&pdata->cle, &idt_reg, 1, i,
729 					RSS_IDT, CLE_CMD_WR);
730 		if (ret)
731 			return ret;
732 	}
733 
734 	ret = xgene_cle_set_rss_skeys(&pdata->cle);
735 	if (ret)
736 		return ret;
737 
738 	return 0;
739 }
740 
741 static int xgene_cle_setup_rss(struct xgene_enet_pdata *pdata)
742 {
743 	struct xgene_enet_cle *cle = &pdata->cle;
744 	void __iomem *base = cle->base;
745 	u32 offset, val = 0;
746 	int i, ret = 0;
747 
748 	offset = CLE_PORT_OFFSET;
749 	for (i = 0; i < cle->parsers; i++) {
750 		if (cle->active_parser != PARSER_ALL)
751 			offset = cle->active_parser * CLE_PORT_OFFSET;
752 		else
753 			offset = i * CLE_PORT_OFFSET;
754 
755 		/* enable RSS */
756 		val = (RSS_IPV4_12B << 1) | 0x1;
757 		writel(val, base + RSS_CTRL0 + offset);
758 	}
759 
760 	/* setup sideband data */
761 	ret = xgene_cle_set_rss_sband(cle);
762 	if (ret)
763 		return ret;
764 
765 	/* setup indirection table */
766 	ret = xgene_cle_set_rss_idt(pdata);
767 	if (ret)
768 		return ret;
769 
770 	return 0;
771 }
772 
773 static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
774 {
775 	struct xgene_enet_cle *enet_cle = &pdata->cle;
776 	u32 def_qid, def_fpsel, def_nxtfpsel, pool_id;
777 	struct xgene_cle_dbptr dbptr[DB_MAX_PTRS];
778 	struct xgene_cle_ptree *ptree;
779 	struct xgene_cle_ptree_kn kn;
780 	int ret;
781 
782 	if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
783 		return -EINVAL;
784 
785 	ptree = &enet_cle->ptree;
786 	ptree->start_pkt = 12; /* Ethertype */
787 
788 	ret = xgene_cle_setup_rss(pdata);
789 	if (ret) {
790 		netdev_err(pdata->ndev, "RSS initialization failed\n");
791 		return ret;
792 	}
793 
794 	def_qid = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
795 	pool_id = pdata->rx_ring[0]->buf_pool->id;
796 	def_fpsel = xgene_enet_get_fpsel(pool_id);
797 	def_nxtfpsel = 0;
798 	if (pdata->rx_ring[0]->page_pool) {
799 		pool_id = pdata->rx_ring[0]->page_pool->id;
800 		def_nxtfpsel = xgene_enet_get_fpsel(pool_id);
801 	}
802 
803 	memset(dbptr, 0, sizeof(struct xgene_cle_dbptr) * DB_MAX_PTRS);
804 	dbptr[DB_RES_ACCEPT].fpsel =  def_fpsel;
805 	dbptr[DB_RES_ACCEPT].nxtfpsel = def_nxtfpsel;
806 	dbptr[DB_RES_ACCEPT].dstqid = def_qid;
807 	dbptr[DB_RES_ACCEPT].cle_priority = 1;
808 
809 	dbptr[DB_RES_DEF].fpsel = def_fpsel;
810 	dbptr[DB_RES_DEF].nxtfpsel = def_nxtfpsel;
811 	dbptr[DB_RES_DEF].dstqid = def_qid;
812 	dbptr[DB_RES_DEF].cle_priority = 7;
813 	xgene_cle_setup_def_dbptr(pdata, enet_cle, &dbptr[DB_RES_DEF],
814 				  DB_RES_ACCEPT, 7);
815 
816 	dbptr[DB_RES_DROP].drop = 1;
817 
818 	memset(&kn, 0, sizeof(kn));
819 	kn.node_type = KN;
820 	kn.num_keys = 1;
821 	kn.key[0].priority = 0;
822 	kn.key[0].result_pointer = DB_RES_ACCEPT;
823 
824 	ptree->kn = &kn;
825 	ptree->dbptr = dbptr;
826 	ptree->num_kn = 1;
827 	ptree->num_dbptr = DB_MAX_PTRS;
828 
829 	return xgene_cle_setup_ptree(pdata, enet_cle);
830 }
831 
832 const struct xgene_cle_ops xgene_cle3in_ops = {
833 	.cle_init = xgene_enet_cle_init,
834 };
835