1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Applied Micro X-Gene SoC Ethernet Classifier structures
3  *
4  * Copyright (c) 2016, Applied Micro Circuits Corporation
5  * Authors: Khuong Dinh <kdinh@apm.com>
6  *          Tanmay Inamdar <tinamdar@apm.com>
7  *          Iyappan Subramanian <isubramanian@apm.com>
8  */
9 
10 #include "xgene_enet_main.h"
11 
12 /* interfaces to convert structures to HW recognized bit formats */
xgene_cle_sband_to_hw(u8 frag,enum xgene_cle_prot_version ver,enum xgene_cle_prot_type type,u32 len,u32 * reg)13 static void xgene_cle_sband_to_hw(u8 frag, enum xgene_cle_prot_version ver,
14 				  enum xgene_cle_prot_type type, u32 len,
15 				  u32 *reg)
16 {
17 	*reg =  SET_VAL(SB_IPFRAG, frag) |
18 		SET_VAL(SB_IPPROT, type) |
19 		SET_VAL(SB_IPVER, ver) |
20 		SET_VAL(SB_HDRLEN, len);
21 }
22 
xgene_cle_idt_to_hw(struct xgene_enet_pdata * pdata,u32 dstqid,u32 fpsel,u32 nfpsel,u32 * idt_reg)23 static void xgene_cle_idt_to_hw(struct xgene_enet_pdata *pdata,
24 				u32 dstqid, u32 fpsel,
25 				u32 nfpsel, u32 *idt_reg)
26 {
27 	if (pdata->enet_id == XGENE_ENET1) {
28 		*idt_reg = SET_VAL(IDT_DSTQID, dstqid) |
29 			   SET_VAL(IDT_FPSEL1, fpsel)  |
30 			   SET_VAL(IDT_NFPSEL1, nfpsel);
31 	} else {
32 		*idt_reg = SET_VAL(IDT_DSTQID, dstqid) |
33 			   SET_VAL(IDT_FPSEL, fpsel)   |
34 			   SET_VAL(IDT_NFPSEL, nfpsel);
35 	}
36 }
37 
xgene_cle_dbptr_to_hw(struct xgene_enet_pdata * pdata,struct xgene_cle_dbptr * dbptr,u32 * buf)38 static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata,
39 				  struct xgene_cle_dbptr *dbptr, u32 *buf)
40 {
41 	buf[0] = SET_VAL(CLE_DROP, dbptr->drop);
42 	buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) |
43 		 SET_VAL(CLE_NFPSEL, dbptr->nxtfpsel) |
44 		 SET_VAL(CLE_DSTQIDL, dbptr->dstqid);
45 
46 	buf[5] = SET_VAL(CLE_DSTQIDH, (u32)dbptr->dstqid >> CLE_DSTQIDL_LEN) |
47 		 SET_VAL(CLE_PRIORITY, dbptr->cle_priority);
48 }
49 
xgene_cle_kn_to_hw(struct xgene_cle_ptree_kn * kn,u32 * buf)50 static void xgene_cle_kn_to_hw(struct xgene_cle_ptree_kn *kn, u32 *buf)
51 {
52 	u32 i, j = 0;
53 	u32 data;
54 
55 	buf[j++] = SET_VAL(CLE_TYPE, kn->node_type);
56 	for (i = 0; i < kn->num_keys; i++) {
57 		struct xgene_cle_ptree_key *key = &kn->key[i];
58 
59 		if (!(i % 2)) {
60 			buf[j] = SET_VAL(CLE_KN_PRIO, key->priority) |
61 				 SET_VAL(CLE_KN_RPTR, key->result_pointer);
62 		} else {
63 			data = SET_VAL(CLE_KN_PRIO, key->priority) |
64 			       SET_VAL(CLE_KN_RPTR, key->result_pointer);
65 			buf[j++] |= (data << 16);
66 		}
67 	}
68 }
69 
xgene_cle_dn_to_hw(const struct xgene_cle_ptree_ewdn * dn,u32 * buf,u32 jb)70 static void xgene_cle_dn_to_hw(const struct xgene_cle_ptree_ewdn *dn,
71 			       u32 *buf, u32 jb)
72 {
73 	const struct xgene_cle_ptree_branch *br;
74 	u32 i, j = 0;
75 	u32 npp;
76 
77 	buf[j++] = SET_VAL(CLE_DN_TYPE, dn->node_type) |
78 		   SET_VAL(CLE_DN_LASTN, dn->last_node) |
79 		   SET_VAL(CLE_DN_HLS, dn->hdr_len_store) |
80 		   SET_VAL(CLE_DN_EXT, dn->hdr_extn) |
81 		   SET_VAL(CLE_DN_BSTOR, dn->byte_store) |
82 		   SET_VAL(CLE_DN_SBSTOR, dn->search_byte_store) |
83 		   SET_VAL(CLE_DN_RPTR, dn->result_pointer);
84 
85 	for (i = 0; i < dn->num_branches; i++) {
86 		br = &dn->branch[i];
87 		npp = br->next_packet_pointer;
88 
89 		if ((br->jump_rel == JMP_ABS) && (npp < CLE_PKTRAM_SIZE))
90 			npp += jb;
91 
92 		buf[j++] = SET_VAL(CLE_BR_VALID, br->valid) |
93 			   SET_VAL(CLE_BR_NPPTR, npp) |
94 			   SET_VAL(CLE_BR_JB, br->jump_bw) |
95 			   SET_VAL(CLE_BR_JR, br->jump_rel) |
96 			   SET_VAL(CLE_BR_OP, br->operation) |
97 			   SET_VAL(CLE_BR_NNODE, br->next_node) |
98 			   SET_VAL(CLE_BR_NBR, br->next_branch);
99 
100 		buf[j++] = SET_VAL(CLE_BR_DATA, br->data) |
101 			   SET_VAL(CLE_BR_MASK, br->mask);
102 	}
103 }
104 
xgene_cle_poll_cmd_done(void __iomem * base,enum xgene_cle_cmd_type cmd)105 static int xgene_cle_poll_cmd_done(void __iomem *base,
106 				   enum xgene_cle_cmd_type cmd)
107 {
108 	u32 status, loop = 10;
109 	int ret = -EBUSY;
110 
111 	while (loop--) {
112 		status = ioread32(base + INDCMD_STATUS);
113 		if (status & cmd) {
114 			ret = 0;
115 			break;
116 		}
117 		usleep_range(1000, 2000);
118 	}
119 
120 	return ret;
121 }
122 
xgene_cle_dram_wr(struct xgene_enet_cle * cle,u32 * data,u8 nregs,u32 index,enum xgene_cle_dram_type type,enum xgene_cle_cmd_type cmd)123 static int xgene_cle_dram_wr(struct xgene_enet_cle *cle, u32 *data, u8 nregs,
124 			     u32 index, enum xgene_cle_dram_type type,
125 			     enum xgene_cle_cmd_type cmd)
126 {
127 	enum xgene_cle_parser parser = cle->active_parser;
128 	void __iomem *base = cle->base;
129 	u32 i, j, ind_addr;
130 	u8 port, nparsers;
131 	int ret = 0;
132 
133 	/* PTREE_RAM onwards, DRAM regions are common for all parsers */
134 	nparsers = (type >= PTREE_RAM) ? 1 : cle->parsers;
135 
136 	for (i = 0; i < nparsers; i++) {
137 		port = i;
138 		if ((type < PTREE_RAM) && (parser != PARSER_ALL))
139 			port = parser;
140 
141 		ind_addr = XGENE_CLE_DRAM(type + (port * 4)) | index;
142 		iowrite32(ind_addr, base + INDADDR);
143 		for (j = 0; j < nregs; j++)
144 			iowrite32(data[j], base + DATA_RAM0 + (j * 4));
145 		iowrite32(cmd, base + INDCMD);
146 
147 		ret = xgene_cle_poll_cmd_done(base, cmd);
148 		if (ret)
149 			break;
150 	}
151 
152 	return ret;
153 }
154 
xgene_cle_enable_ptree(struct xgene_enet_pdata * pdata,struct xgene_enet_cle * cle)155 static void xgene_cle_enable_ptree(struct xgene_enet_pdata *pdata,
156 				   struct xgene_enet_cle *cle)
157 {
158 	struct xgene_cle_ptree *ptree = &cle->ptree;
159 	void __iomem *addr, *base = cle->base;
160 	u32 offset = CLE_PORT_OFFSET;
161 	u32 i;
162 
163 	/* 1G port has to advance 4 bytes and 10G has to advance 8 bytes */
164 	ptree->start_pkt += cle->jump_bytes;
165 	for (i = 0; i < cle->parsers; i++) {
166 		if (cle->active_parser != PARSER_ALL)
167 			addr = base + cle->active_parser * offset;
168 		else
169 			addr = base + (i * offset);
170 
171 		iowrite32(ptree->start_node & 0x3fff, addr + SNPTR0);
172 		iowrite32(ptree->start_pkt & 0x1ff, addr + SPPTR0);
173 	}
174 }
175 
xgene_cle_setup_dbptr(struct xgene_enet_pdata * pdata,struct xgene_enet_cle * cle)176 static int xgene_cle_setup_dbptr(struct xgene_enet_pdata *pdata,
177 				 struct xgene_enet_cle *cle)
178 {
179 	struct xgene_cle_ptree *ptree = &cle->ptree;
180 	u32 buf[CLE_DRAM_REGS];
181 	u32 i;
182 	int ret;
183 
184 	memset(buf, 0, sizeof(buf));
185 	for (i = 0; i < ptree->num_dbptr; i++) {
186 		xgene_cle_dbptr_to_hw(pdata, &ptree->dbptr[i], buf);
187 		ret = xgene_cle_dram_wr(cle, buf, 6, i + ptree->start_dbptr,
188 					DB_RAM,	CLE_CMD_WR);
189 		if (ret)
190 			return ret;
191 	}
192 
193 	return 0;
194 }
195 
196 static const struct xgene_cle_ptree_ewdn xgene_init_ptree_dn[] = {
197 	{
198 		/* PKT_TYPE_NODE */
199 		.node_type = EWDN,
200 		.last_node = 0,
201 		.hdr_len_store = 1,
202 		.hdr_extn = NO_BYTE,
203 		.byte_store = NO_BYTE,
204 		.search_byte_store = NO_BYTE,
205 		.result_pointer = DB_RES_DROP,
206 		.num_branches = 2,
207 		.branch = {
208 			{
209 				/* IPV4 */
210 				.valid = 1,
211 				.next_packet_pointer = 22,
212 				.jump_bw = JMP_FW,
213 				.jump_rel = JMP_ABS,
214 				.operation = EQT,
215 				.next_node = PKT_PROT_NODE,
216 				.next_branch = 0,
217 				.data = 0x8,
218 				.mask = 0x0
219 			},
220 			{
221 				.valid = 0,
222 				.next_packet_pointer = 262,
223 				.jump_bw = JMP_FW,
224 				.jump_rel = JMP_ABS,
225 				.operation = EQT,
226 				.next_node = LAST_NODE,
227 				.next_branch = 0,
228 				.data = 0x0,
229 				.mask = 0xffff
230 			}
231 		},
232 	},
233 	{
234 		/* PKT_PROT_NODE */
235 		.node_type = EWDN,
236 		.last_node = 0,
237 		.hdr_len_store = 1,
238 		.hdr_extn = NO_BYTE,
239 		.byte_store = NO_BYTE,
240 		.search_byte_store = NO_BYTE,
241 		.result_pointer = DB_RES_DROP,
242 		.num_branches = 3,
243 		.branch = {
244 			{
245 				/* TCP */
246 				.valid = 1,
247 				.next_packet_pointer = 26,
248 				.jump_bw = JMP_FW,
249 				.jump_rel = JMP_ABS,
250 				.operation = EQT,
251 				.next_node = RSS_IPV4_TCP_NODE,
252 				.next_branch = 0,
253 				.data = 0x0600,
254 				.mask = 0x00ff
255 			},
256 			{
257 				/* UDP */
258 				.valid = 1,
259 				.next_packet_pointer = 26,
260 				.jump_bw = JMP_FW,
261 				.jump_rel = JMP_ABS,
262 				.operation = EQT,
263 				.next_node = RSS_IPV4_UDP_NODE,
264 				.next_branch = 0,
265 				.data = 0x1100,
266 				.mask = 0x00ff
267 			},
268 			{
269 				.valid = 0,
270 				.next_packet_pointer = 26,
271 				.jump_bw = JMP_FW,
272 				.jump_rel = JMP_ABS,
273 				.operation = EQT,
274 				.next_node = RSS_IPV4_OTHERS_NODE,
275 				.next_branch = 0,
276 				.data = 0x0,
277 				.mask = 0xffff
278 			}
279 		}
280 	},
281 	{
282 		/* RSS_IPV4_TCP_NODE */
283 		.node_type = EWDN,
284 		.last_node = 0,
285 		.hdr_len_store = 1,
286 		.hdr_extn = NO_BYTE,
287 		.byte_store = NO_BYTE,
288 		.search_byte_store = BOTH_BYTES,
289 		.result_pointer = DB_RES_DROP,
290 		.num_branches = 6,
291 		.branch = {
292 			{
293 				/* SRC IPV4 B01 */
294 				.valid = 0,
295 				.next_packet_pointer = 28,
296 				.jump_bw = JMP_FW,
297 				.jump_rel = JMP_ABS,
298 				.operation = EQT,
299 				.next_node = RSS_IPV4_TCP_NODE,
300 				.next_branch = 1,
301 				.data = 0x0,
302 				.mask = 0xffff
303 			},
304 			{
305 				/* SRC IPV4 B23 */
306 				.valid = 0,
307 				.next_packet_pointer = 30,
308 				.jump_bw = JMP_FW,
309 				.jump_rel = JMP_ABS,
310 				.operation = EQT,
311 				.next_node = RSS_IPV4_TCP_NODE,
312 				.next_branch = 2,
313 				.data = 0x0,
314 				.mask = 0xffff
315 			},
316 			{
317 				/* DST IPV4 B01 */
318 				.valid = 0,
319 				.next_packet_pointer = 32,
320 				.jump_bw = JMP_FW,
321 				.jump_rel = JMP_ABS,
322 				.operation = EQT,
323 				.next_node = RSS_IPV4_TCP_NODE,
324 				.next_branch = 3,
325 				.data = 0x0,
326 				.mask = 0xffff
327 			},
328 			{
329 				/* DST IPV4 B23 */
330 				.valid = 0,
331 				.next_packet_pointer = 34,
332 				.jump_bw = JMP_FW,
333 				.jump_rel = JMP_ABS,
334 				.operation = EQT,
335 				.next_node = RSS_IPV4_TCP_NODE,
336 				.next_branch = 4,
337 				.data = 0x0,
338 				.mask = 0xffff
339 			},
340 			{
341 				/* TCP SRC Port */
342 				.valid = 0,
343 				.next_packet_pointer = 36,
344 				.jump_bw = JMP_FW,
345 				.jump_rel = JMP_ABS,
346 				.operation = EQT,
347 				.next_node = RSS_IPV4_TCP_NODE,
348 				.next_branch = 5,
349 				.data = 0x0,
350 				.mask = 0xffff
351 			},
352 			{
353 				/* TCP DST Port */
354 				.valid = 0,
355 				.next_packet_pointer = 256,
356 				.jump_bw = JMP_FW,
357 				.jump_rel = JMP_ABS,
358 				.operation = EQT,
359 				.next_node = LAST_NODE,
360 				.next_branch = 0,
361 				.data = 0x0,
362 				.mask = 0xffff
363 			}
364 		}
365 	},
366 	{
367 		/* RSS_IPV4_UDP_NODE */
368 		.node_type = EWDN,
369 		.last_node = 0,
370 		.hdr_len_store = 1,
371 		.hdr_extn = NO_BYTE,
372 		.byte_store = NO_BYTE,
373 		.search_byte_store = BOTH_BYTES,
374 		.result_pointer = DB_RES_DROP,
375 		.num_branches = 6,
376 		.branch = {
377 			{
378 				/* SRC IPV4 B01 */
379 				.valid = 0,
380 				.next_packet_pointer = 28,
381 				.jump_bw = JMP_FW,
382 				.jump_rel = JMP_ABS,
383 				.operation = EQT,
384 				.next_node = RSS_IPV4_UDP_NODE,
385 				.next_branch = 1,
386 				.data = 0x0,
387 				.mask = 0xffff
388 			},
389 			{
390 				/* SRC IPV4 B23 */
391 				.valid = 0,
392 				.next_packet_pointer = 30,
393 				.jump_bw = JMP_FW,
394 				.jump_rel = JMP_ABS,
395 				.operation = EQT,
396 				.next_node = RSS_IPV4_UDP_NODE,
397 				.next_branch = 2,
398 				.data = 0x0,
399 				.mask = 0xffff
400 			},
401 			{
402 				/* DST IPV4 B01 */
403 				.valid = 0,
404 				.next_packet_pointer = 32,
405 				.jump_bw = JMP_FW,
406 				.jump_rel = JMP_ABS,
407 				.operation = EQT,
408 				.next_node = RSS_IPV4_UDP_NODE,
409 				.next_branch = 3,
410 				.data = 0x0,
411 				.mask = 0xffff
412 			},
413 			{
414 				/* DST IPV4 B23 */
415 				.valid = 0,
416 				.next_packet_pointer = 34,
417 				.jump_bw = JMP_FW,
418 				.jump_rel = JMP_ABS,
419 				.operation = EQT,
420 				.next_node = RSS_IPV4_UDP_NODE,
421 				.next_branch = 4,
422 				.data = 0x0,
423 				.mask = 0xffff
424 			},
425 			{
426 				/* TCP SRC Port */
427 				.valid = 0,
428 				.next_packet_pointer = 36,
429 				.jump_bw = JMP_FW,
430 				.jump_rel = JMP_ABS,
431 				.operation = EQT,
432 				.next_node = RSS_IPV4_UDP_NODE,
433 				.next_branch = 5,
434 				.data = 0x0,
435 				.mask = 0xffff
436 			},
437 			{
438 				/* TCP DST Port */
439 				.valid = 0,
440 				.next_packet_pointer = 258,
441 				.jump_bw = JMP_FW,
442 				.jump_rel = JMP_ABS,
443 				.operation = EQT,
444 				.next_node = LAST_NODE,
445 				.next_branch = 0,
446 				.data = 0x0,
447 				.mask = 0xffff
448 			}
449 		}
450 	},
451 	{
452 		/* RSS_IPV4_OTHERS_NODE */
453 		.node_type = EWDN,
454 		.last_node = 0,
455 		.hdr_len_store = 1,
456 		.hdr_extn = NO_BYTE,
457 		.byte_store = NO_BYTE,
458 		.search_byte_store = BOTH_BYTES,
459 		.result_pointer = DB_RES_DROP,
460 		.num_branches = 6,
461 		.branch = {
462 			{
463 				/* SRC IPV4 B01 */
464 				.valid = 0,
465 				.next_packet_pointer = 28,
466 				.jump_bw = JMP_FW,
467 				.jump_rel = JMP_ABS,
468 				.operation = EQT,
469 				.next_node = RSS_IPV4_OTHERS_NODE,
470 				.next_branch = 1,
471 				.data = 0x0,
472 				.mask = 0xffff
473 			},
474 			{
475 				/* SRC IPV4 B23 */
476 				.valid = 0,
477 				.next_packet_pointer = 30,
478 				.jump_bw = JMP_FW,
479 				.jump_rel = JMP_ABS,
480 				.operation = EQT,
481 				.next_node = RSS_IPV4_OTHERS_NODE,
482 				.next_branch = 2,
483 				.data = 0x0,
484 				.mask = 0xffff
485 			},
486 			{
487 				/* DST IPV4 B01 */
488 				.valid = 0,
489 				.next_packet_pointer = 32,
490 				.jump_bw = JMP_FW,
491 				.jump_rel = JMP_ABS,
492 				.operation = EQT,
493 				.next_node = RSS_IPV4_OTHERS_NODE,
494 				.next_branch = 3,
495 				.data = 0x0,
496 				.mask = 0xffff
497 			},
498 			{
499 				/* DST IPV4 B23 */
500 				.valid = 0,
501 				.next_packet_pointer = 34,
502 				.jump_bw = JMP_FW,
503 				.jump_rel = JMP_ABS,
504 				.operation = EQT,
505 				.next_node = RSS_IPV4_OTHERS_NODE,
506 				.next_branch = 4,
507 				.data = 0x0,
508 				.mask = 0xffff
509 			},
510 			{
511 				/* TCP SRC Port */
512 				.valid = 0,
513 				.next_packet_pointer = 36,
514 				.jump_bw = JMP_FW,
515 				.jump_rel = JMP_ABS,
516 				.operation = EQT,
517 				.next_node = RSS_IPV4_OTHERS_NODE,
518 				.next_branch = 5,
519 				.data = 0x0,
520 				.mask = 0xffff
521 			},
522 			{
523 				/* TCP DST Port */
524 				.valid = 0,
525 				.next_packet_pointer = 260,
526 				.jump_bw = JMP_FW,
527 				.jump_rel = JMP_ABS,
528 				.operation = EQT,
529 				.next_node = LAST_NODE,
530 				.next_branch = 0,
531 				.data = 0x0,
532 				.mask = 0xffff
533 			}
534 		}
535 	},
536 
537 	{
538 		/* LAST NODE */
539 		.node_type = EWDN,
540 		.last_node = 1,
541 		.hdr_len_store = 1,
542 		.hdr_extn = NO_BYTE,
543 		.byte_store = NO_BYTE,
544 		.search_byte_store = NO_BYTE,
545 		.result_pointer = DB_RES_DROP,
546 		.num_branches = 1,
547 		.branch = {
548 			{
549 				.valid = 0,
550 				.next_packet_pointer = 0,
551 				.jump_bw = JMP_FW,
552 				.jump_rel = JMP_ABS,
553 				.operation = EQT,
554 				.next_node = MAX_NODES,
555 				.next_branch = 0,
556 				.data = 0,
557 				.mask = 0xffff
558 			}
559 		}
560 	}
561 };
562 
xgene_cle_setup_node(struct xgene_enet_pdata * pdata,struct xgene_enet_cle * cle)563 static int xgene_cle_setup_node(struct xgene_enet_pdata *pdata,
564 				struct xgene_enet_cle *cle)
565 {
566 	struct xgene_cle_ptree *ptree = &cle->ptree;
567 	const struct xgene_cle_ptree_ewdn *dn = xgene_init_ptree_dn;
568 	int num_dn = ARRAY_SIZE(xgene_init_ptree_dn);
569 	struct xgene_cle_ptree_kn *kn = ptree->kn;
570 	u32 buf[CLE_DRAM_REGS];
571 	int i, j, ret;
572 
573 	memset(buf, 0, sizeof(buf));
574 	for (i = 0; i < num_dn; i++) {
575 		xgene_cle_dn_to_hw(&dn[i], buf, cle->jump_bytes);
576 		ret = xgene_cle_dram_wr(cle, buf, 17, i + ptree->start_node,
577 					PTREE_RAM, CLE_CMD_WR);
578 		if (ret)
579 			return ret;
580 	}
581 
582 	/* continue node index for key node */
583 	memset(buf, 0, sizeof(buf));
584 	for (j = i; j < (ptree->num_kn + num_dn); j++) {
585 		xgene_cle_kn_to_hw(&kn[j - num_dn], buf);
586 		ret = xgene_cle_dram_wr(cle, buf, 17, j + ptree->start_node,
587 					PTREE_RAM, CLE_CMD_WR);
588 		if (ret)
589 			return ret;
590 	}
591 
592 	return 0;
593 }
594 
xgene_cle_setup_ptree(struct xgene_enet_pdata * pdata,struct xgene_enet_cle * cle)595 static int xgene_cle_setup_ptree(struct xgene_enet_pdata *pdata,
596 				 struct xgene_enet_cle *cle)
597 {
598 	int ret;
599 
600 	ret = xgene_cle_setup_node(pdata, cle);
601 	if (ret)
602 		return ret;
603 
604 	ret = xgene_cle_setup_dbptr(pdata, cle);
605 	if (ret)
606 		return ret;
607 
608 	xgene_cle_enable_ptree(pdata, cle);
609 
610 	return 0;
611 }
612 
xgene_cle_setup_def_dbptr(struct xgene_enet_pdata * pdata,struct xgene_enet_cle * enet_cle,struct xgene_cle_dbptr * dbptr,u32 index,u8 priority)613 static void xgene_cle_setup_def_dbptr(struct xgene_enet_pdata *pdata,
614 				      struct xgene_enet_cle *enet_cle,
615 				      struct xgene_cle_dbptr *dbptr,
616 				      u32 index, u8 priority)
617 {
618 	void __iomem *base = enet_cle->base;
619 	void __iomem *base_addr;
620 	u32 buf[CLE_DRAM_REGS];
621 	u32 def_cls, offset;
622 	u32 i, j;
623 
624 	memset(buf, 0, sizeof(buf));
625 	xgene_cle_dbptr_to_hw(pdata, dbptr, buf);
626 
627 	for (i = 0; i < enet_cle->parsers; i++) {
628 		if (enet_cle->active_parser != PARSER_ALL) {
629 			offset = enet_cle->active_parser *
630 				CLE_PORT_OFFSET;
631 		} else {
632 			offset = i * CLE_PORT_OFFSET;
633 		}
634 
635 		base_addr = base + DFCLSRESDB00 + offset;
636 		for (j = 0; j < 6; j++)
637 			iowrite32(buf[j], base_addr + (j * 4));
638 
639 		def_cls = ((priority & 0x7) << 10) | (index & 0x3ff);
640 		iowrite32(def_cls, base + DFCLSRESDBPTR0 + offset);
641 	}
642 }
643 
xgene_cle_set_rss_sband(struct xgene_enet_cle * cle)644 static int xgene_cle_set_rss_sband(struct xgene_enet_cle *cle)
645 {
646 	u32 idx = CLE_PKTRAM_SIZE / sizeof(u32);
647 	u32 mac_hdr_len = ETH_HLEN;
648 	u32 sband, reg = 0;
649 	u32 ipv4_ihl = 5;
650 	u32 hdr_len;
651 	int ret;
652 
653 	/* Sideband: IPV4/TCP packets */
654 	hdr_len = (mac_hdr_len << 5) | ipv4_ihl;
655 	xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_TCP, hdr_len, &reg);
656 	sband = reg;
657 
658 	/* Sideband: IPv4/UDP packets */
659 	hdr_len = (mac_hdr_len << 5) | ipv4_ihl;
660 	xgene_cle_sband_to_hw(1, XGENE_CLE_IPV4, XGENE_CLE_UDP, hdr_len, &reg);
661 	sband |= (reg << 16);
662 
663 	ret = xgene_cle_dram_wr(cle, &sband, 1, idx, PKT_RAM, CLE_CMD_WR);
664 	if (ret)
665 		return ret;
666 
667 	/* Sideband: IPv4/RAW packets */
668 	hdr_len = (mac_hdr_len << 5) | ipv4_ihl;
669 	xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER,
670 			      hdr_len, &reg);
671 	sband = reg;
672 
673 	/* Sideband: Ethernet II/RAW packets */
674 	hdr_len = (mac_hdr_len << 5);
675 	xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER,
676 			      hdr_len, &reg);
677 	sband |= (reg << 16);
678 
679 	ret = xgene_cle_dram_wr(cle, &sband, 1, idx + 1, PKT_RAM, CLE_CMD_WR);
680 	if (ret)
681 		return ret;
682 
683 	return 0;
684 }
685 
xgene_cle_set_rss_skeys(struct xgene_enet_cle * cle)686 static int xgene_cle_set_rss_skeys(struct xgene_enet_cle *cle)
687 {
688 	u32 secret_key_ipv4[4];  /* 16 Bytes*/
689 	int ret = 0;
690 
691 	get_random_bytes(secret_key_ipv4, 16);
692 	ret = xgene_cle_dram_wr(cle, secret_key_ipv4, 4, 0,
693 				RSS_IPV4_HASH_SKEY, CLE_CMD_WR);
694 	return ret;
695 }
696 
xgene_cle_set_rss_idt(struct xgene_enet_pdata * pdata)697 static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata)
698 {
699 	u32 fpsel, dstqid, nfpsel, idt_reg, idx;
700 	int i, ret = 0;
701 	u16 pool_id;
702 
703 	for (i = 0; i < XGENE_CLE_IDT_ENTRIES; i++) {
704 		idx = i % pdata->rxq_cnt;
705 		pool_id = pdata->rx_ring[idx]->buf_pool->id;
706 		fpsel = xgene_enet_get_fpsel(pool_id);
707 		dstqid = xgene_enet_dst_ring_num(pdata->rx_ring[idx]);
708 		nfpsel = 0;
709 		if (pdata->rx_ring[idx]->page_pool) {
710 			pool_id = pdata->rx_ring[idx]->page_pool->id;
711 			nfpsel = xgene_enet_get_fpsel(pool_id);
712 		}
713 
714 		idt_reg = 0;
715 		xgene_cle_idt_to_hw(pdata, dstqid, fpsel, nfpsel, &idt_reg);
716 		ret = xgene_cle_dram_wr(&pdata->cle, &idt_reg, 1, i,
717 					RSS_IDT, CLE_CMD_WR);
718 		if (ret)
719 			return ret;
720 	}
721 
722 	ret = xgene_cle_set_rss_skeys(&pdata->cle);
723 	if (ret)
724 		return ret;
725 
726 	return 0;
727 }
728 
xgene_cle_setup_rss(struct xgene_enet_pdata * pdata)729 static int xgene_cle_setup_rss(struct xgene_enet_pdata *pdata)
730 {
731 	struct xgene_enet_cle *cle = &pdata->cle;
732 	void __iomem *base = cle->base;
733 	u32 offset, val = 0;
734 	int i, ret = 0;
735 
736 	offset = CLE_PORT_OFFSET;
737 	for (i = 0; i < cle->parsers; i++) {
738 		if (cle->active_parser != PARSER_ALL)
739 			offset = cle->active_parser * CLE_PORT_OFFSET;
740 		else
741 			offset = i * CLE_PORT_OFFSET;
742 
743 		/* enable RSS */
744 		val = (RSS_IPV4_12B << 1) | 0x1;
745 		writel(val, base + RSS_CTRL0 + offset);
746 	}
747 
748 	/* setup sideband data */
749 	ret = xgene_cle_set_rss_sband(cle);
750 	if (ret)
751 		return ret;
752 
753 	/* setup indirection table */
754 	ret = xgene_cle_set_rss_idt(pdata);
755 	if (ret)
756 		return ret;
757 
758 	return 0;
759 }
760 
xgene_enet_cle_init(struct xgene_enet_pdata * pdata)761 static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
762 {
763 	struct xgene_enet_cle *enet_cle = &pdata->cle;
764 	u32 def_qid, def_fpsel, def_nxtfpsel, pool_id;
765 	struct xgene_cle_dbptr dbptr[DB_MAX_PTRS];
766 	struct xgene_cle_ptree *ptree;
767 	struct xgene_cle_ptree_kn kn;
768 	int ret;
769 
770 	if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
771 		return -EINVAL;
772 
773 	ptree = &enet_cle->ptree;
774 	ptree->start_pkt = 12; /* Ethertype */
775 
776 	ret = xgene_cle_setup_rss(pdata);
777 	if (ret) {
778 		netdev_err(pdata->ndev, "RSS initialization failed\n");
779 		return ret;
780 	}
781 
782 	def_qid = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
783 	pool_id = pdata->rx_ring[0]->buf_pool->id;
784 	def_fpsel = xgene_enet_get_fpsel(pool_id);
785 	def_nxtfpsel = 0;
786 	if (pdata->rx_ring[0]->page_pool) {
787 		pool_id = pdata->rx_ring[0]->page_pool->id;
788 		def_nxtfpsel = xgene_enet_get_fpsel(pool_id);
789 	}
790 
791 	memset(dbptr, 0, sizeof(struct xgene_cle_dbptr) * DB_MAX_PTRS);
792 	dbptr[DB_RES_ACCEPT].fpsel =  def_fpsel;
793 	dbptr[DB_RES_ACCEPT].nxtfpsel = def_nxtfpsel;
794 	dbptr[DB_RES_ACCEPT].dstqid = def_qid;
795 	dbptr[DB_RES_ACCEPT].cle_priority = 1;
796 
797 	dbptr[DB_RES_DEF].fpsel = def_fpsel;
798 	dbptr[DB_RES_DEF].nxtfpsel = def_nxtfpsel;
799 	dbptr[DB_RES_DEF].dstqid = def_qid;
800 	dbptr[DB_RES_DEF].cle_priority = 7;
801 	xgene_cle_setup_def_dbptr(pdata, enet_cle, &dbptr[DB_RES_DEF],
802 				  DB_RES_ACCEPT, 7);
803 
804 	dbptr[DB_RES_DROP].drop = 1;
805 
806 	memset(&kn, 0, sizeof(kn));
807 	kn.node_type = KN;
808 	kn.num_keys = 1;
809 	kn.key[0].priority = 0;
810 	kn.key[0].result_pointer = DB_RES_ACCEPT;
811 
812 	ptree->kn = &kn;
813 	ptree->dbptr = dbptr;
814 	ptree->num_kn = 1;
815 	ptree->num_dbptr = DB_MAX_PTRS;
816 
817 	return xgene_cle_setup_ptree(pdata, enet_cle);
818 }
819 
820 const struct xgene_cle_ops xgene_cle3in_ops = {
821 	.cle_init = xgene_enet_cle_init,
822 };
823