1 /* Applied Micro X-Gene SoC Ethernet Classifier structures 2 * 3 * Copyright (c) 2016, Applied Micro Circuits Corporation 4 * Authors: Khuong Dinh <kdinh@apm.com> 5 * Tanmay Inamdar <tinamdar@apm.com> 6 * Iyappan Subramanian <isubramanian@apm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2 of the License, or (at your 11 * option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include "xgene_enet_main.h" 23 24 /* interfaces to convert structures to HW recognized bit formats */ 25 static void xgene_cle_sband_to_hw(u8 frag, enum xgene_cle_prot_version ver, 26 enum xgene_cle_prot_type type, u32 len, 27 u32 *reg) 28 { 29 *reg = SET_VAL(SB_IPFRAG, frag) | 30 SET_VAL(SB_IPPROT, type) | 31 SET_VAL(SB_IPVER, ver) | 32 SET_VAL(SB_HDRLEN, len); 33 } 34 35 static void xgene_cle_idt_to_hw(struct xgene_enet_pdata *pdata, 36 u32 dstqid, u32 fpsel, 37 u32 nfpsel, u32 *idt_reg) 38 { 39 if (pdata->enet_id == XGENE_ENET1) { 40 *idt_reg = SET_VAL(IDT_DSTQID, dstqid) | 41 SET_VAL(IDT_FPSEL1, fpsel) | 42 SET_VAL(IDT_NFPSEL1, nfpsel); 43 } else { 44 *idt_reg = SET_VAL(IDT_DSTQID, dstqid) | 45 SET_VAL(IDT_FPSEL, fpsel) | 46 SET_VAL(IDT_NFPSEL, nfpsel); 47 } 48 } 49 50 static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata, 51 struct xgene_cle_dbptr *dbptr, u32 *buf) 52 { 53 buf[0] = SET_VAL(CLE_DROP, dbptr->drop); 54 buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) | 55 SET_VAL(CLE_DSTQIDL, dbptr->dstqid); 56 57 buf[5] = SET_VAL(CLE_DSTQIDH, (u32)dbptr->dstqid >> CLE_DSTQIDL_LEN) | 58 SET_VAL(CLE_PRIORITY, dbptr->cle_priority); 59 } 60 61 static void xgene_cle_kn_to_hw(struct xgene_cle_ptree_kn *kn, u32 *buf) 62 { 63 u32 i, j = 0; 64 u32 data; 65 66 buf[j++] = SET_VAL(CLE_TYPE, kn->node_type); 67 for (i = 0; i < kn->num_keys; i++) { 68 struct xgene_cle_ptree_key *key = &kn->key[i]; 69 70 if (!(i % 2)) { 71 buf[j] = SET_VAL(CLE_KN_PRIO, key->priority) | 72 SET_VAL(CLE_KN_RPTR, key->result_pointer); 73 } else { 74 data = SET_VAL(CLE_KN_PRIO, key->priority) | 75 SET_VAL(CLE_KN_RPTR, key->result_pointer); 76 buf[j++] |= (data << 16); 77 } 78 } 79 } 80 81 static void xgene_cle_dn_to_hw(struct xgene_cle_ptree_ewdn *dn, 82 u32 *buf, u32 jb) 83 { 84 struct xgene_cle_ptree_branch *br; 85 u32 i, j = 0; 86 u32 npp; 87 88 buf[j++] = SET_VAL(CLE_DN_TYPE, dn->node_type) | 89 SET_VAL(CLE_DN_LASTN, dn->last_node) | 90 SET_VAL(CLE_DN_HLS, dn->hdr_len_store) | 91 SET_VAL(CLE_DN_EXT, dn->hdr_extn) | 92 SET_VAL(CLE_DN_BSTOR, dn->byte_store) | 93 SET_VAL(CLE_DN_SBSTOR, dn->search_byte_store) | 94 SET_VAL(CLE_DN_RPTR, dn->result_pointer); 95 96 for (i = 0; i < dn->num_branches; i++) { 97 br = &dn->branch[i]; 98 npp = br->next_packet_pointer; 99 100 if ((br->jump_rel == JMP_ABS) && (npp < CLE_PKTRAM_SIZE)) 101 npp += jb; 102 103 buf[j++] = SET_VAL(CLE_BR_VALID, br->valid) | 104 SET_VAL(CLE_BR_NPPTR, npp) | 105 SET_VAL(CLE_BR_JB, br->jump_bw) | 106 SET_VAL(CLE_BR_JR, br->jump_rel) | 107 SET_VAL(CLE_BR_OP, br->operation) | 108 SET_VAL(CLE_BR_NNODE, br->next_node) | 109 SET_VAL(CLE_BR_NBR, br->next_branch); 110 111 buf[j++] = SET_VAL(CLE_BR_DATA, br->data) | 112 SET_VAL(CLE_BR_MASK, br->mask); 113 } 114 } 115 116 static int xgene_cle_poll_cmd_done(void __iomem *base, 117 enum xgene_cle_cmd_type cmd) 118 { 119 u32 status, loop = 10; 120 int ret = -EBUSY; 121 122 while (loop--) { 123 status = ioread32(base + INDCMD_STATUS); 124 if (status & cmd) { 125 ret = 0; 126 break; 127 } 128 usleep_range(1000, 2000); 129 } 130 131 return ret; 132 } 133 134 static int xgene_cle_dram_wr(struct xgene_enet_cle *cle, u32 *data, u8 nregs, 135 u32 index, enum xgene_cle_dram_type type, 136 enum xgene_cle_cmd_type cmd) 137 { 138 enum xgene_cle_parser parser = cle->active_parser; 139 void __iomem *base = cle->base; 140 u32 i, j, ind_addr; 141 u8 port, nparsers; 142 int ret = 0; 143 144 /* PTREE_RAM onwards, DRAM regions are common for all parsers */ 145 nparsers = (type >= PTREE_RAM) ? 1 : cle->parsers; 146 147 for (i = 0; i < nparsers; i++) { 148 port = i; 149 if ((type < PTREE_RAM) && (parser != PARSER_ALL)) 150 port = parser; 151 152 ind_addr = XGENE_CLE_DRAM(type + (port * 4)) | index; 153 iowrite32(ind_addr, base + INDADDR); 154 for (j = 0; j < nregs; j++) 155 iowrite32(data[j], base + DATA_RAM0 + (j * 4)); 156 iowrite32(cmd, base + INDCMD); 157 158 ret = xgene_cle_poll_cmd_done(base, cmd); 159 if (ret) 160 break; 161 } 162 163 return ret; 164 } 165 166 static void xgene_cle_enable_ptree(struct xgene_enet_pdata *pdata, 167 struct xgene_enet_cle *cle) 168 { 169 struct xgene_cle_ptree *ptree = &cle->ptree; 170 void __iomem *addr, *base = cle->base; 171 u32 offset = CLE_PORT_OFFSET; 172 u32 i; 173 174 /* 1G port has to advance 4 bytes and 10G has to advance 8 bytes */ 175 ptree->start_pkt += cle->jump_bytes; 176 for (i = 0; i < cle->parsers; i++) { 177 if (cle->active_parser != PARSER_ALL) 178 addr = base + cle->active_parser * offset; 179 else 180 addr = base + (i * offset); 181 182 iowrite32(ptree->start_node & 0x3fff, addr + SNPTR0); 183 iowrite32(ptree->start_pkt & 0x1ff, addr + SPPTR0); 184 } 185 } 186 187 static int xgene_cle_setup_dbptr(struct xgene_enet_pdata *pdata, 188 struct xgene_enet_cle *cle) 189 { 190 struct xgene_cle_ptree *ptree = &cle->ptree; 191 u32 buf[CLE_DRAM_REGS]; 192 u32 i; 193 int ret; 194 195 memset(buf, 0, sizeof(buf)); 196 for (i = 0; i < ptree->num_dbptr; i++) { 197 xgene_cle_dbptr_to_hw(pdata, &ptree->dbptr[i], buf); 198 ret = xgene_cle_dram_wr(cle, buf, 6, i + ptree->start_dbptr, 199 DB_RAM, CLE_CMD_WR); 200 if (ret) 201 return ret; 202 } 203 204 return 0; 205 } 206 207 static int xgene_cle_setup_node(struct xgene_enet_pdata *pdata, 208 struct xgene_enet_cle *cle) 209 { 210 struct xgene_cle_ptree *ptree = &cle->ptree; 211 struct xgene_cle_ptree_ewdn *dn = ptree->dn; 212 struct xgene_cle_ptree_kn *kn = ptree->kn; 213 u32 buf[CLE_DRAM_REGS]; 214 int i, j, ret; 215 216 memset(buf, 0, sizeof(buf)); 217 for (i = 0; i < ptree->num_dn; i++) { 218 xgene_cle_dn_to_hw(&dn[i], buf, cle->jump_bytes); 219 ret = xgene_cle_dram_wr(cle, buf, 17, i + ptree->start_node, 220 PTREE_RAM, CLE_CMD_WR); 221 if (ret) 222 return ret; 223 } 224 225 /* continue node index for key node */ 226 memset(buf, 0, sizeof(buf)); 227 for (j = i; j < (ptree->num_kn + ptree->num_dn); j++) { 228 xgene_cle_kn_to_hw(&kn[j - ptree->num_dn], buf); 229 ret = xgene_cle_dram_wr(cle, buf, 17, j + ptree->start_node, 230 PTREE_RAM, CLE_CMD_WR); 231 if (ret) 232 return ret; 233 } 234 235 return 0; 236 } 237 238 static int xgene_cle_setup_ptree(struct xgene_enet_pdata *pdata, 239 struct xgene_enet_cle *cle) 240 { 241 int ret; 242 243 ret = xgene_cle_setup_node(pdata, cle); 244 if (ret) 245 return ret; 246 247 ret = xgene_cle_setup_dbptr(pdata, cle); 248 if (ret) 249 return ret; 250 251 xgene_cle_enable_ptree(pdata, cle); 252 253 return 0; 254 } 255 256 static void xgene_cle_setup_def_dbptr(struct xgene_enet_pdata *pdata, 257 struct xgene_enet_cle *enet_cle, 258 struct xgene_cle_dbptr *dbptr, 259 u32 index, u8 priority) 260 { 261 void __iomem *base = enet_cle->base; 262 void __iomem *base_addr; 263 u32 buf[CLE_DRAM_REGS]; 264 u32 def_cls, offset; 265 u32 i, j; 266 267 memset(buf, 0, sizeof(buf)); 268 xgene_cle_dbptr_to_hw(pdata, dbptr, buf); 269 270 for (i = 0; i < enet_cle->parsers; i++) { 271 if (enet_cle->active_parser != PARSER_ALL) { 272 offset = enet_cle->active_parser * 273 CLE_PORT_OFFSET; 274 } else { 275 offset = i * CLE_PORT_OFFSET; 276 } 277 278 base_addr = base + DFCLSRESDB00 + offset; 279 for (j = 0; j < 6; j++) 280 iowrite32(buf[j], base_addr + (j * 4)); 281 282 def_cls = ((priority & 0x7) << 10) | (index & 0x3ff); 283 iowrite32(def_cls, base + DFCLSRESDBPTR0 + offset); 284 } 285 } 286 287 static int xgene_cle_set_rss_sband(struct xgene_enet_cle *cle) 288 { 289 u32 idx = CLE_PKTRAM_SIZE / sizeof(u32); 290 u32 mac_hdr_len = ETH_HLEN; 291 u32 sband, reg = 0; 292 u32 ipv4_ihl = 5; 293 u32 hdr_len; 294 int ret; 295 296 /* Sideband: IPV4/TCP packets */ 297 hdr_len = (mac_hdr_len << 5) | ipv4_ihl; 298 xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_TCP, hdr_len, ®); 299 sband = reg; 300 301 /* Sideband: IPv4/UDP packets */ 302 hdr_len = (mac_hdr_len << 5) | ipv4_ihl; 303 xgene_cle_sband_to_hw(1, XGENE_CLE_IPV4, XGENE_CLE_UDP, hdr_len, ®); 304 sband |= (reg << 16); 305 306 ret = xgene_cle_dram_wr(cle, &sband, 1, idx, PKT_RAM, CLE_CMD_WR); 307 if (ret) 308 return ret; 309 310 /* Sideband: IPv4/RAW packets */ 311 hdr_len = (mac_hdr_len << 5) | ipv4_ihl; 312 xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER, 313 hdr_len, ®); 314 sband = reg; 315 316 /* Sideband: Ethernet II/RAW packets */ 317 hdr_len = (mac_hdr_len << 5); 318 xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER, 319 hdr_len, ®); 320 sband |= (reg << 16); 321 322 ret = xgene_cle_dram_wr(cle, &sband, 1, idx + 1, PKT_RAM, CLE_CMD_WR); 323 if (ret) 324 return ret; 325 326 return 0; 327 } 328 329 static int xgene_cle_set_rss_skeys(struct xgene_enet_cle *cle) 330 { 331 u32 secret_key_ipv4[4]; /* 16 Bytes*/ 332 int ret = 0; 333 334 get_random_bytes(secret_key_ipv4, 16); 335 ret = xgene_cle_dram_wr(cle, secret_key_ipv4, 4, 0, 336 RSS_IPV4_HASH_SKEY, CLE_CMD_WR); 337 return ret; 338 } 339 340 static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata) 341 { 342 u32 fpsel, dstqid, nfpsel, idt_reg, idx; 343 int i, ret = 0; 344 u16 pool_id; 345 346 for (i = 0; i < XGENE_CLE_IDT_ENTRIES; i++) { 347 idx = i % pdata->rxq_cnt; 348 pool_id = pdata->rx_ring[idx]->buf_pool->id; 349 fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20; 350 dstqid = xgene_enet_dst_ring_num(pdata->rx_ring[idx]); 351 nfpsel = 0; 352 idt_reg = 0; 353 354 xgene_cle_idt_to_hw(pdata, dstqid, fpsel, nfpsel, &idt_reg); 355 ret = xgene_cle_dram_wr(&pdata->cle, &idt_reg, 1, i, 356 RSS_IDT, CLE_CMD_WR); 357 if (ret) 358 return ret; 359 } 360 361 ret = xgene_cle_set_rss_skeys(&pdata->cle); 362 if (ret) 363 return ret; 364 365 return 0; 366 } 367 368 static int xgene_cle_setup_rss(struct xgene_enet_pdata *pdata) 369 { 370 struct xgene_enet_cle *cle = &pdata->cle; 371 void __iomem *base = cle->base; 372 u32 offset, val = 0; 373 int i, ret = 0; 374 375 offset = CLE_PORT_OFFSET; 376 for (i = 0; i < cle->parsers; i++) { 377 if (cle->active_parser != PARSER_ALL) 378 offset = cle->active_parser * CLE_PORT_OFFSET; 379 else 380 offset = i * CLE_PORT_OFFSET; 381 382 /* enable RSS */ 383 val = (RSS_IPV4_12B << 1) | 0x1; 384 writel(val, base + RSS_CTRL0 + offset); 385 } 386 387 /* setup sideband data */ 388 ret = xgene_cle_set_rss_sband(cle); 389 if (ret) 390 return ret; 391 392 /* setup indirection table */ 393 ret = xgene_cle_set_rss_idt(pdata); 394 if (ret) 395 return ret; 396 397 return 0; 398 } 399 400 static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) 401 { 402 struct xgene_enet_cle *enet_cle = &pdata->cle; 403 struct xgene_cle_dbptr dbptr[DB_MAX_PTRS]; 404 struct xgene_cle_ptree_branch *br; 405 u32 def_qid, def_fpsel, pool_id; 406 struct xgene_cle_ptree *ptree; 407 struct xgene_cle_ptree_kn kn; 408 int ret; 409 struct xgene_cle_ptree_ewdn ptree_dn[] = { 410 { 411 /* PKT_TYPE_NODE */ 412 .node_type = EWDN, 413 .last_node = 0, 414 .hdr_len_store = 1, 415 .hdr_extn = NO_BYTE, 416 .byte_store = NO_BYTE, 417 .search_byte_store = NO_BYTE, 418 .result_pointer = DB_RES_DROP, 419 .num_branches = 2, 420 .branch = { 421 { 422 /* IPV4 */ 423 .valid = 1, 424 .next_packet_pointer = 22, 425 .jump_bw = JMP_FW, 426 .jump_rel = JMP_ABS, 427 .operation = EQT, 428 .next_node = PKT_PROT_NODE, 429 .next_branch = 0, 430 .data = 0x8, 431 .mask = 0x0 432 }, 433 { 434 .valid = 0, 435 .next_packet_pointer = 262, 436 .jump_bw = JMP_FW, 437 .jump_rel = JMP_ABS, 438 .operation = EQT, 439 .next_node = LAST_NODE, 440 .next_branch = 0, 441 .data = 0x0, 442 .mask = 0xffff 443 } 444 }, 445 }, 446 { 447 /* PKT_PROT_NODE */ 448 .node_type = EWDN, 449 .last_node = 0, 450 .hdr_len_store = 1, 451 .hdr_extn = NO_BYTE, 452 .byte_store = NO_BYTE, 453 .search_byte_store = NO_BYTE, 454 .result_pointer = DB_RES_DROP, 455 .num_branches = 3, 456 .branch = { 457 { 458 /* TCP */ 459 .valid = 1, 460 .next_packet_pointer = 26, 461 .jump_bw = JMP_FW, 462 .jump_rel = JMP_ABS, 463 .operation = EQT, 464 .next_node = RSS_IPV4_TCP_NODE, 465 .next_branch = 0, 466 .data = 0x0600, 467 .mask = 0x00ff 468 }, 469 { 470 /* UDP */ 471 .valid = 1, 472 .next_packet_pointer = 26, 473 .jump_bw = JMP_FW, 474 .jump_rel = JMP_ABS, 475 .operation = EQT, 476 .next_node = RSS_IPV4_UDP_NODE, 477 .next_branch = 0, 478 .data = 0x1100, 479 .mask = 0x00ff 480 }, 481 { 482 .valid = 0, 483 .next_packet_pointer = 260, 484 .jump_bw = JMP_FW, 485 .jump_rel = JMP_ABS, 486 .operation = EQT, 487 .next_node = LAST_NODE, 488 .next_branch = 0, 489 .data = 0x0, 490 .mask = 0xffff 491 } 492 } 493 }, 494 { 495 /* RSS_IPV4_TCP_NODE */ 496 .node_type = EWDN, 497 .last_node = 0, 498 .hdr_len_store = 1, 499 .hdr_extn = NO_BYTE, 500 .byte_store = NO_BYTE, 501 .search_byte_store = BOTH_BYTES, 502 .result_pointer = DB_RES_DROP, 503 .num_branches = 6, 504 .branch = { 505 { 506 /* SRC IPV4 B01 */ 507 .valid = 0, 508 .next_packet_pointer = 28, 509 .jump_bw = JMP_FW, 510 .jump_rel = JMP_ABS, 511 .operation = EQT, 512 .next_node = RSS_IPV4_TCP_NODE, 513 .next_branch = 1, 514 .data = 0x0, 515 .mask = 0xffff 516 }, 517 { 518 /* SRC IPV4 B23 */ 519 .valid = 0, 520 .next_packet_pointer = 30, 521 .jump_bw = JMP_FW, 522 .jump_rel = JMP_ABS, 523 .operation = EQT, 524 .next_node = RSS_IPV4_TCP_NODE, 525 .next_branch = 2, 526 .data = 0x0, 527 .mask = 0xffff 528 }, 529 { 530 /* DST IPV4 B01 */ 531 .valid = 0, 532 .next_packet_pointer = 32, 533 .jump_bw = JMP_FW, 534 .jump_rel = JMP_ABS, 535 .operation = EQT, 536 .next_node = RSS_IPV4_TCP_NODE, 537 .next_branch = 3, 538 .data = 0x0, 539 .mask = 0xffff 540 }, 541 { 542 /* DST IPV4 B23 */ 543 .valid = 0, 544 .next_packet_pointer = 34, 545 .jump_bw = JMP_FW, 546 .jump_rel = JMP_ABS, 547 .operation = EQT, 548 .next_node = RSS_IPV4_TCP_NODE, 549 .next_branch = 4, 550 .data = 0x0, 551 .mask = 0xffff 552 }, 553 { 554 /* TCP SRC Port */ 555 .valid = 0, 556 .next_packet_pointer = 36, 557 .jump_bw = JMP_FW, 558 .jump_rel = JMP_ABS, 559 .operation = EQT, 560 .next_node = RSS_IPV4_TCP_NODE, 561 .next_branch = 5, 562 .data = 0x0, 563 .mask = 0xffff 564 }, 565 { 566 /* TCP DST Port */ 567 .valid = 0, 568 .next_packet_pointer = 256, 569 .jump_bw = JMP_FW, 570 .jump_rel = JMP_ABS, 571 .operation = EQT, 572 .next_node = LAST_NODE, 573 .next_branch = 0, 574 .data = 0x0, 575 .mask = 0xffff 576 } 577 } 578 }, 579 { 580 /* RSS_IPV4_UDP_NODE */ 581 .node_type = EWDN, 582 .last_node = 0, 583 .hdr_len_store = 1, 584 .hdr_extn = NO_BYTE, 585 .byte_store = NO_BYTE, 586 .search_byte_store = BOTH_BYTES, 587 .result_pointer = DB_RES_DROP, 588 .num_branches = 6, 589 .branch = { 590 { 591 /* SRC IPV4 B01 */ 592 .valid = 0, 593 .next_packet_pointer = 28, 594 .jump_bw = JMP_FW, 595 .jump_rel = JMP_ABS, 596 .operation = EQT, 597 .next_node = RSS_IPV4_UDP_NODE, 598 .next_branch = 1, 599 .data = 0x0, 600 .mask = 0xffff 601 }, 602 { 603 /* SRC IPV4 B23 */ 604 .valid = 0, 605 .next_packet_pointer = 30, 606 .jump_bw = JMP_FW, 607 .jump_rel = JMP_ABS, 608 .operation = EQT, 609 .next_node = RSS_IPV4_UDP_NODE, 610 .next_branch = 2, 611 .data = 0x0, 612 .mask = 0xffff 613 }, 614 { 615 /* DST IPV4 B01 */ 616 .valid = 0, 617 .next_packet_pointer = 32, 618 .jump_bw = JMP_FW, 619 .jump_rel = JMP_ABS, 620 .operation = EQT, 621 .next_node = RSS_IPV4_UDP_NODE, 622 .next_branch = 3, 623 .data = 0x0, 624 .mask = 0xffff 625 }, 626 { 627 /* DST IPV4 B23 */ 628 .valid = 0, 629 .next_packet_pointer = 34, 630 .jump_bw = JMP_FW, 631 .jump_rel = JMP_ABS, 632 .operation = EQT, 633 .next_node = RSS_IPV4_UDP_NODE, 634 .next_branch = 4, 635 .data = 0x0, 636 .mask = 0xffff 637 }, 638 { 639 /* TCP SRC Port */ 640 .valid = 0, 641 .next_packet_pointer = 36, 642 .jump_bw = JMP_FW, 643 .jump_rel = JMP_ABS, 644 .operation = EQT, 645 .next_node = RSS_IPV4_UDP_NODE, 646 .next_branch = 5, 647 .data = 0x0, 648 .mask = 0xffff 649 }, 650 { 651 /* TCP DST Port */ 652 .valid = 0, 653 .next_packet_pointer = 258, 654 .jump_bw = JMP_FW, 655 .jump_rel = JMP_ABS, 656 .operation = EQT, 657 .next_node = LAST_NODE, 658 .next_branch = 0, 659 .data = 0x0, 660 .mask = 0xffff 661 } 662 } 663 }, 664 { 665 /* LAST NODE */ 666 .node_type = EWDN, 667 .last_node = 1, 668 .hdr_len_store = 1, 669 .hdr_extn = NO_BYTE, 670 .byte_store = NO_BYTE, 671 .search_byte_store = NO_BYTE, 672 .result_pointer = DB_RES_DROP, 673 .num_branches = 1, 674 .branch = { 675 { 676 .valid = 0, 677 .next_packet_pointer = 0, 678 .jump_bw = JMP_FW, 679 .jump_rel = JMP_ABS, 680 .operation = EQT, 681 .next_node = MAX_NODES, 682 .next_branch = 0, 683 .data = 0, 684 .mask = 0xffff 685 } 686 } 687 } 688 }; 689 690 ptree = &enet_cle->ptree; 691 ptree->start_pkt = 12; /* Ethertype */ 692 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { 693 ret = xgene_cle_setup_rss(pdata); 694 if (ret) { 695 netdev_err(pdata->ndev, "RSS initialization failed\n"); 696 return ret; 697 } 698 } else { 699 br = &ptree_dn[PKT_PROT_NODE].branch[0]; 700 br->valid = 0; 701 br->next_packet_pointer = 260; 702 br->next_node = LAST_NODE; 703 br->data = 0x0000; 704 br->mask = 0xffff; 705 } 706 707 def_qid = xgene_enet_dst_ring_num(pdata->rx_ring[0]); 708 pool_id = pdata->rx_ring[0]->buf_pool->id; 709 def_fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20; 710 711 memset(dbptr, 0, sizeof(struct xgene_cle_dbptr) * DB_MAX_PTRS); 712 dbptr[DB_RES_ACCEPT].fpsel = def_fpsel; 713 dbptr[DB_RES_ACCEPT].dstqid = def_qid; 714 dbptr[DB_RES_ACCEPT].cle_priority = 1; 715 716 dbptr[DB_RES_DEF].fpsel = def_fpsel; 717 dbptr[DB_RES_DEF].dstqid = def_qid; 718 dbptr[DB_RES_DEF].cle_priority = 7; 719 xgene_cle_setup_def_dbptr(pdata, enet_cle, &dbptr[DB_RES_DEF], 720 DB_RES_ACCEPT, 7); 721 722 dbptr[DB_RES_DROP].drop = 1; 723 724 memset(&kn, 0, sizeof(kn)); 725 kn.node_type = KN; 726 kn.num_keys = 1; 727 kn.key[0].priority = 0; 728 kn.key[0].result_pointer = DB_RES_ACCEPT; 729 730 ptree->dn = ptree_dn; 731 ptree->kn = &kn; 732 ptree->dbptr = dbptr; 733 ptree->num_dn = MAX_NODES; 734 ptree->num_kn = 1; 735 ptree->num_dbptr = DB_MAX_PTRS; 736 737 return xgene_cle_setup_ptree(pdata, enet_cle); 738 } 739 740 const struct xgene_cle_ops xgene_cle3in_ops = { 741 .cle_init = xgene_enet_cle_init, 742 }; 743