1 /* Applied Micro X-Gene SoC Ethernet Classifier structures 2 * 3 * Copyright (c) 2016, Applied Micro Circuits Corporation 4 * Authors: Khuong Dinh <kdinh@apm.com> 5 * Tanmay Inamdar <tinamdar@apm.com> 6 * Iyappan Subramanian <isubramanian@apm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2 of the License, or (at your 11 * option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include "xgene_enet_main.h" 23 24 /* interfaces to convert structures to HW recognized bit formats */ 25 static void xgene_cle_sband_to_hw(u8 frag, enum xgene_cle_prot_version ver, 26 enum xgene_cle_prot_type type, u32 len, 27 u32 *reg) 28 { 29 *reg = SET_VAL(SB_IPFRAG, frag) | 30 SET_VAL(SB_IPPROT, type) | 31 SET_VAL(SB_IPVER, ver) | 32 SET_VAL(SB_HDRLEN, len); 33 } 34 35 static void xgene_cle_idt_to_hw(u32 dstqid, u32 fpsel, 36 u32 nfpsel, u32 *idt_reg) 37 { 38 *idt_reg = SET_VAL(IDT_DSTQID, dstqid) | 39 SET_VAL(IDT_FPSEL, fpsel) | 40 SET_VAL(IDT_NFPSEL, nfpsel); 41 } 42 43 static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata, 44 struct xgene_cle_dbptr *dbptr, u32 *buf) 45 { 46 buf[0] = SET_VAL(CLE_DROP, dbptr->drop); 47 buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) | 48 SET_VAL(CLE_DSTQIDL, dbptr->dstqid); 49 50 buf[5] = SET_VAL(CLE_DSTQIDH, (u32)dbptr->dstqid >> CLE_DSTQIDL_LEN) | 51 SET_VAL(CLE_PRIORITY, dbptr->cle_priority); 52 } 53 54 static void xgene_cle_kn_to_hw(struct xgene_cle_ptree_kn *kn, u32 *buf) 55 { 56 u32 i, j = 0; 57 u32 data; 58 59 buf[j++] = SET_VAL(CLE_TYPE, kn->node_type); 60 for (i = 0; i < kn->num_keys; i++) { 61 struct xgene_cle_ptree_key *key = &kn->key[i]; 62 63 if (!(i % 2)) { 64 buf[j] = SET_VAL(CLE_KN_PRIO, key->priority) | 65 SET_VAL(CLE_KN_RPTR, key->result_pointer); 66 } else { 67 data = SET_VAL(CLE_KN_PRIO, key->priority) | 68 SET_VAL(CLE_KN_RPTR, key->result_pointer); 69 buf[j++] |= (data << 16); 70 } 71 } 72 } 73 74 static void xgene_cle_dn_to_hw(struct xgene_cle_ptree_ewdn *dn, 75 u32 *buf, u32 jb) 76 { 77 struct xgene_cle_ptree_branch *br; 78 u32 i, j = 0; 79 u32 npp; 80 81 buf[j++] = SET_VAL(CLE_DN_TYPE, dn->node_type) | 82 SET_VAL(CLE_DN_LASTN, dn->last_node) | 83 SET_VAL(CLE_DN_HLS, dn->hdr_len_store) | 84 SET_VAL(CLE_DN_EXT, dn->hdr_extn) | 85 SET_VAL(CLE_DN_BSTOR, dn->byte_store) | 86 SET_VAL(CLE_DN_SBSTOR, dn->search_byte_store) | 87 SET_VAL(CLE_DN_RPTR, dn->result_pointer); 88 89 for (i = 0; i < dn->num_branches; i++) { 90 br = &dn->branch[i]; 91 npp = br->next_packet_pointer; 92 93 if ((br->jump_rel == JMP_ABS) && (npp < CLE_PKTRAM_SIZE)) 94 npp += jb; 95 96 buf[j++] = SET_VAL(CLE_BR_VALID, br->valid) | 97 SET_VAL(CLE_BR_NPPTR, npp) | 98 SET_VAL(CLE_BR_JB, br->jump_bw) | 99 SET_VAL(CLE_BR_JR, br->jump_rel) | 100 SET_VAL(CLE_BR_OP, br->operation) | 101 SET_VAL(CLE_BR_NNODE, br->next_node) | 102 SET_VAL(CLE_BR_NBR, br->next_branch); 103 104 buf[j++] = SET_VAL(CLE_BR_DATA, br->data) | 105 SET_VAL(CLE_BR_MASK, br->mask); 106 } 107 } 108 109 static int xgene_cle_poll_cmd_done(void __iomem *base, 110 enum xgene_cle_cmd_type cmd) 111 { 112 u32 status, loop = 10; 113 int ret = -EBUSY; 114 115 while (loop--) { 116 status = ioread32(base + INDCMD_STATUS); 117 if (status & cmd) { 118 ret = 0; 119 break; 120 } 121 usleep_range(1000, 2000); 122 } 123 124 return ret; 125 } 126 127 static int xgene_cle_dram_wr(struct xgene_enet_cle *cle, u32 *data, u8 nregs, 128 u32 index, enum xgene_cle_dram_type type, 129 enum xgene_cle_cmd_type cmd) 130 { 131 enum xgene_cle_parser parser = cle->active_parser; 132 void __iomem *base = cle->base; 133 u32 i, j, ind_addr; 134 u8 port, nparsers; 135 int ret = 0; 136 137 /* PTREE_RAM onwards, DRAM regions are common for all parsers */ 138 nparsers = (type >= PTREE_RAM) ? 1 : cle->parsers; 139 140 for (i = 0; i < nparsers; i++) { 141 port = i; 142 if ((type < PTREE_RAM) && (parser != PARSER_ALL)) 143 port = parser; 144 145 ind_addr = XGENE_CLE_DRAM(type + (port * 4)) | index; 146 iowrite32(ind_addr, base + INDADDR); 147 for (j = 0; j < nregs; j++) 148 iowrite32(data[j], base + DATA_RAM0 + (j * 4)); 149 iowrite32(cmd, base + INDCMD); 150 151 ret = xgene_cle_poll_cmd_done(base, cmd); 152 if (ret) 153 break; 154 } 155 156 return ret; 157 } 158 159 static void xgene_cle_enable_ptree(struct xgene_enet_pdata *pdata, 160 struct xgene_enet_cle *cle) 161 { 162 struct xgene_cle_ptree *ptree = &cle->ptree; 163 void __iomem *addr, *base = cle->base; 164 u32 offset = CLE_PORT_OFFSET; 165 u32 i; 166 167 /* 1G port has to advance 4 bytes and 10G has to advance 8 bytes */ 168 ptree->start_pkt += cle->jump_bytes; 169 for (i = 0; i < cle->parsers; i++) { 170 if (cle->active_parser != PARSER_ALL) 171 addr = base + cle->active_parser * offset; 172 else 173 addr = base + (i * offset); 174 175 iowrite32(ptree->start_node & 0x3fff, addr + SNPTR0); 176 iowrite32(ptree->start_pkt & 0x1ff, addr + SPPTR0); 177 } 178 } 179 180 static int xgene_cle_setup_dbptr(struct xgene_enet_pdata *pdata, 181 struct xgene_enet_cle *cle) 182 { 183 struct xgene_cle_ptree *ptree = &cle->ptree; 184 u32 buf[CLE_DRAM_REGS]; 185 u32 i; 186 int ret; 187 188 memset(buf, 0, sizeof(buf)); 189 for (i = 0; i < ptree->num_dbptr; i++) { 190 xgene_cle_dbptr_to_hw(pdata, &ptree->dbptr[i], buf); 191 ret = xgene_cle_dram_wr(cle, buf, 6, i + ptree->start_dbptr, 192 DB_RAM, CLE_CMD_WR); 193 if (ret) 194 return ret; 195 } 196 197 return 0; 198 } 199 200 static int xgene_cle_setup_node(struct xgene_enet_pdata *pdata, 201 struct xgene_enet_cle *cle) 202 { 203 struct xgene_cle_ptree *ptree = &cle->ptree; 204 struct xgene_cle_ptree_ewdn *dn = ptree->dn; 205 struct xgene_cle_ptree_kn *kn = ptree->kn; 206 u32 buf[CLE_DRAM_REGS]; 207 int i, j, ret; 208 209 memset(buf, 0, sizeof(buf)); 210 for (i = 0; i < ptree->num_dn; i++) { 211 xgene_cle_dn_to_hw(&dn[i], buf, cle->jump_bytes); 212 ret = xgene_cle_dram_wr(cle, buf, 17, i + ptree->start_node, 213 PTREE_RAM, CLE_CMD_WR); 214 if (ret) 215 return ret; 216 } 217 218 /* continue node index for key node */ 219 memset(buf, 0, sizeof(buf)); 220 for (j = i; j < (ptree->num_kn + ptree->num_dn); j++) { 221 xgene_cle_kn_to_hw(&kn[j - ptree->num_dn], buf); 222 ret = xgene_cle_dram_wr(cle, buf, 17, j + ptree->start_node, 223 PTREE_RAM, CLE_CMD_WR); 224 if (ret) 225 return ret; 226 } 227 228 return 0; 229 } 230 231 static int xgene_cle_setup_ptree(struct xgene_enet_pdata *pdata, 232 struct xgene_enet_cle *cle) 233 { 234 int ret; 235 236 ret = xgene_cle_setup_node(pdata, cle); 237 if (ret) 238 return ret; 239 240 ret = xgene_cle_setup_dbptr(pdata, cle); 241 if (ret) 242 return ret; 243 244 xgene_cle_enable_ptree(pdata, cle); 245 246 return 0; 247 } 248 249 static void xgene_cle_setup_def_dbptr(struct xgene_enet_pdata *pdata, 250 struct xgene_enet_cle *enet_cle, 251 struct xgene_cle_dbptr *dbptr, 252 u32 index, u8 priority) 253 { 254 void __iomem *base = enet_cle->base; 255 void __iomem *base_addr; 256 u32 buf[CLE_DRAM_REGS]; 257 u32 def_cls, offset; 258 u32 i, j; 259 260 memset(buf, 0, sizeof(buf)); 261 xgene_cle_dbptr_to_hw(pdata, dbptr, buf); 262 263 for (i = 0; i < enet_cle->parsers; i++) { 264 if (enet_cle->active_parser != PARSER_ALL) { 265 offset = enet_cle->active_parser * 266 CLE_PORT_OFFSET; 267 } else { 268 offset = i * CLE_PORT_OFFSET; 269 } 270 271 base_addr = base + DFCLSRESDB00 + offset; 272 for (j = 0; j < 6; j++) 273 iowrite32(buf[j], base_addr + (j * 4)); 274 275 def_cls = ((priority & 0x7) << 10) | (index & 0x3ff); 276 iowrite32(def_cls, base + DFCLSRESDBPTR0 + offset); 277 } 278 } 279 280 static int xgene_cle_set_rss_sband(struct xgene_enet_cle *cle) 281 { 282 u32 idx = CLE_PKTRAM_SIZE / sizeof(u32); 283 u32 mac_hdr_len = ETH_HLEN; 284 u32 sband, reg = 0; 285 u32 ipv4_ihl = 5; 286 u32 hdr_len; 287 int ret; 288 289 /* Sideband: IPV4/TCP packets */ 290 hdr_len = (mac_hdr_len << 5) | ipv4_ihl; 291 xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_TCP, hdr_len, ®); 292 sband = reg; 293 294 /* Sideband: IPv4/UDP packets */ 295 hdr_len = (mac_hdr_len << 5) | ipv4_ihl; 296 xgene_cle_sband_to_hw(1, XGENE_CLE_IPV4, XGENE_CLE_UDP, hdr_len, ®); 297 sband |= (reg << 16); 298 299 ret = xgene_cle_dram_wr(cle, &sband, 1, idx, PKT_RAM, CLE_CMD_WR); 300 if (ret) 301 return ret; 302 303 /* Sideband: IPv4/RAW packets */ 304 hdr_len = (mac_hdr_len << 5) | ipv4_ihl; 305 xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER, 306 hdr_len, ®); 307 sband = reg; 308 309 /* Sideband: Ethernet II/RAW packets */ 310 hdr_len = (mac_hdr_len << 5); 311 xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER, 312 hdr_len, ®); 313 sband |= (reg << 16); 314 315 ret = xgene_cle_dram_wr(cle, &sband, 1, idx + 1, PKT_RAM, CLE_CMD_WR); 316 if (ret) 317 return ret; 318 319 return 0; 320 } 321 322 static int xgene_cle_set_rss_skeys(struct xgene_enet_cle *cle) 323 { 324 u32 secret_key_ipv4[4]; /* 16 Bytes*/ 325 int ret = 0; 326 327 get_random_bytes(secret_key_ipv4, 16); 328 ret = xgene_cle_dram_wr(cle, secret_key_ipv4, 4, 0, 329 RSS_IPV4_HASH_SKEY, CLE_CMD_WR); 330 return ret; 331 } 332 333 static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata) 334 { 335 u32 fpsel, dstqid, nfpsel, idt_reg, idx; 336 int i, ret = 0; 337 u16 pool_id; 338 339 for (i = 0; i < XGENE_CLE_IDT_ENTRIES; i++) { 340 idx = i % pdata->rxq_cnt; 341 pool_id = pdata->rx_ring[idx]->buf_pool->id; 342 fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20; 343 dstqid = xgene_enet_dst_ring_num(pdata->rx_ring[idx]); 344 nfpsel = 0; 345 idt_reg = 0; 346 347 xgene_cle_idt_to_hw(dstqid, fpsel, nfpsel, &idt_reg); 348 ret = xgene_cle_dram_wr(&pdata->cle, &idt_reg, 1, i, 349 RSS_IDT, CLE_CMD_WR); 350 if (ret) 351 return ret; 352 } 353 354 ret = xgene_cle_set_rss_skeys(&pdata->cle); 355 if (ret) 356 return ret; 357 358 return 0; 359 } 360 361 static int xgene_cle_setup_rss(struct xgene_enet_pdata *pdata) 362 { 363 struct xgene_enet_cle *cle = &pdata->cle; 364 void __iomem *base = cle->base; 365 u32 offset, val = 0; 366 int i, ret = 0; 367 368 offset = CLE_PORT_OFFSET; 369 for (i = 0; i < cle->parsers; i++) { 370 if (cle->active_parser != PARSER_ALL) 371 offset = cle->active_parser * CLE_PORT_OFFSET; 372 else 373 offset = i * CLE_PORT_OFFSET; 374 375 /* enable RSS */ 376 val = (RSS_IPV4_12B << 1) | 0x1; 377 writel(val, base + RSS_CTRL0 + offset); 378 } 379 380 /* setup sideband data */ 381 ret = xgene_cle_set_rss_sband(cle); 382 if (ret) 383 return ret; 384 385 /* setup indirection table */ 386 ret = xgene_cle_set_rss_idt(pdata); 387 if (ret) 388 return ret; 389 390 return 0; 391 } 392 393 static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) 394 { 395 struct xgene_enet_cle *enet_cle = &pdata->cle; 396 struct xgene_cle_dbptr dbptr[DB_MAX_PTRS]; 397 struct xgene_cle_ptree_branch *br; 398 u32 def_qid, def_fpsel, pool_id; 399 struct xgene_cle_ptree *ptree; 400 struct xgene_cle_ptree_kn kn; 401 int ret; 402 struct xgene_cle_ptree_ewdn ptree_dn[] = { 403 { 404 /* PKT_TYPE_NODE */ 405 .node_type = EWDN, 406 .last_node = 0, 407 .hdr_len_store = 1, 408 .hdr_extn = NO_BYTE, 409 .byte_store = NO_BYTE, 410 .search_byte_store = NO_BYTE, 411 .result_pointer = DB_RES_DROP, 412 .num_branches = 2, 413 .branch = { 414 { 415 /* IPV4 */ 416 .valid = 1, 417 .next_packet_pointer = 22, 418 .jump_bw = JMP_FW, 419 .jump_rel = JMP_ABS, 420 .operation = EQT, 421 .next_node = PKT_PROT_NODE, 422 .next_branch = 0, 423 .data = 0x8, 424 .mask = 0x0 425 }, 426 { 427 .valid = 0, 428 .next_packet_pointer = 262, 429 .jump_bw = JMP_FW, 430 .jump_rel = JMP_ABS, 431 .operation = EQT, 432 .next_node = LAST_NODE, 433 .next_branch = 0, 434 .data = 0x0, 435 .mask = 0xffff 436 } 437 }, 438 }, 439 { 440 /* PKT_PROT_NODE */ 441 .node_type = EWDN, 442 .last_node = 0, 443 .hdr_len_store = 1, 444 .hdr_extn = NO_BYTE, 445 .byte_store = NO_BYTE, 446 .search_byte_store = NO_BYTE, 447 .result_pointer = DB_RES_DROP, 448 .num_branches = 3, 449 .branch = { 450 { 451 /* TCP */ 452 .valid = 1, 453 .next_packet_pointer = 26, 454 .jump_bw = JMP_FW, 455 .jump_rel = JMP_ABS, 456 .operation = EQT, 457 .next_node = RSS_IPV4_TCP_NODE, 458 .next_branch = 0, 459 .data = 0x0600, 460 .mask = 0x00ff 461 }, 462 { 463 /* UDP */ 464 .valid = 1, 465 .next_packet_pointer = 26, 466 .jump_bw = JMP_FW, 467 .jump_rel = JMP_ABS, 468 .operation = EQT, 469 .next_node = RSS_IPV4_UDP_NODE, 470 .next_branch = 0, 471 .data = 0x1100, 472 .mask = 0x00ff 473 }, 474 { 475 .valid = 0, 476 .next_packet_pointer = 260, 477 .jump_bw = JMP_FW, 478 .jump_rel = JMP_ABS, 479 .operation = EQT, 480 .next_node = LAST_NODE, 481 .next_branch = 0, 482 .data = 0x0, 483 .mask = 0xffff 484 } 485 } 486 }, 487 { 488 /* RSS_IPV4_TCP_NODE */ 489 .node_type = EWDN, 490 .last_node = 0, 491 .hdr_len_store = 1, 492 .hdr_extn = NO_BYTE, 493 .byte_store = NO_BYTE, 494 .search_byte_store = BOTH_BYTES, 495 .result_pointer = DB_RES_DROP, 496 .num_branches = 6, 497 .branch = { 498 { 499 /* SRC IPV4 B01 */ 500 .valid = 0, 501 .next_packet_pointer = 28, 502 .jump_bw = JMP_FW, 503 .jump_rel = JMP_ABS, 504 .operation = EQT, 505 .next_node = RSS_IPV4_TCP_NODE, 506 .next_branch = 1, 507 .data = 0x0, 508 .mask = 0xffff 509 }, 510 { 511 /* SRC IPV4 B23 */ 512 .valid = 0, 513 .next_packet_pointer = 30, 514 .jump_bw = JMP_FW, 515 .jump_rel = JMP_ABS, 516 .operation = EQT, 517 .next_node = RSS_IPV4_TCP_NODE, 518 .next_branch = 2, 519 .data = 0x0, 520 .mask = 0xffff 521 }, 522 { 523 /* DST IPV4 B01 */ 524 .valid = 0, 525 .next_packet_pointer = 32, 526 .jump_bw = JMP_FW, 527 .jump_rel = JMP_ABS, 528 .operation = EQT, 529 .next_node = RSS_IPV4_TCP_NODE, 530 .next_branch = 3, 531 .data = 0x0, 532 .mask = 0xffff 533 }, 534 { 535 /* DST IPV4 B23 */ 536 .valid = 0, 537 .next_packet_pointer = 34, 538 .jump_bw = JMP_FW, 539 .jump_rel = JMP_ABS, 540 .operation = EQT, 541 .next_node = RSS_IPV4_TCP_NODE, 542 .next_branch = 4, 543 .data = 0x0, 544 .mask = 0xffff 545 }, 546 { 547 /* TCP SRC Port */ 548 .valid = 0, 549 .next_packet_pointer = 36, 550 .jump_bw = JMP_FW, 551 .jump_rel = JMP_ABS, 552 .operation = EQT, 553 .next_node = RSS_IPV4_TCP_NODE, 554 .next_branch = 5, 555 .data = 0x0, 556 .mask = 0xffff 557 }, 558 { 559 /* TCP DST Port */ 560 .valid = 0, 561 .next_packet_pointer = 256, 562 .jump_bw = JMP_FW, 563 .jump_rel = JMP_ABS, 564 .operation = EQT, 565 .next_node = LAST_NODE, 566 .next_branch = 0, 567 .data = 0x0, 568 .mask = 0xffff 569 } 570 } 571 }, 572 { 573 /* RSS_IPV4_UDP_NODE */ 574 .node_type = EWDN, 575 .last_node = 0, 576 .hdr_len_store = 1, 577 .hdr_extn = NO_BYTE, 578 .byte_store = NO_BYTE, 579 .search_byte_store = BOTH_BYTES, 580 .result_pointer = DB_RES_DROP, 581 .num_branches = 6, 582 .branch = { 583 { 584 /* SRC IPV4 B01 */ 585 .valid = 0, 586 .next_packet_pointer = 28, 587 .jump_bw = JMP_FW, 588 .jump_rel = JMP_ABS, 589 .operation = EQT, 590 .next_node = RSS_IPV4_UDP_NODE, 591 .next_branch = 1, 592 .data = 0x0, 593 .mask = 0xffff 594 }, 595 { 596 /* SRC IPV4 B23 */ 597 .valid = 0, 598 .next_packet_pointer = 30, 599 .jump_bw = JMP_FW, 600 .jump_rel = JMP_ABS, 601 .operation = EQT, 602 .next_node = RSS_IPV4_UDP_NODE, 603 .next_branch = 2, 604 .data = 0x0, 605 .mask = 0xffff 606 }, 607 { 608 /* DST IPV4 B01 */ 609 .valid = 0, 610 .next_packet_pointer = 32, 611 .jump_bw = JMP_FW, 612 .jump_rel = JMP_ABS, 613 .operation = EQT, 614 .next_node = RSS_IPV4_UDP_NODE, 615 .next_branch = 3, 616 .data = 0x0, 617 .mask = 0xffff 618 }, 619 { 620 /* DST IPV4 B23 */ 621 .valid = 0, 622 .next_packet_pointer = 34, 623 .jump_bw = JMP_FW, 624 .jump_rel = JMP_ABS, 625 .operation = EQT, 626 .next_node = RSS_IPV4_UDP_NODE, 627 .next_branch = 4, 628 .data = 0x0, 629 .mask = 0xffff 630 }, 631 { 632 /* TCP SRC Port */ 633 .valid = 0, 634 .next_packet_pointer = 36, 635 .jump_bw = JMP_FW, 636 .jump_rel = JMP_ABS, 637 .operation = EQT, 638 .next_node = RSS_IPV4_UDP_NODE, 639 .next_branch = 5, 640 .data = 0x0, 641 .mask = 0xffff 642 }, 643 { 644 /* TCP DST Port */ 645 .valid = 0, 646 .next_packet_pointer = 258, 647 .jump_bw = JMP_FW, 648 .jump_rel = JMP_ABS, 649 .operation = EQT, 650 .next_node = LAST_NODE, 651 .next_branch = 0, 652 .data = 0x0, 653 .mask = 0xffff 654 } 655 } 656 }, 657 { 658 /* LAST NODE */ 659 .node_type = EWDN, 660 .last_node = 1, 661 .hdr_len_store = 1, 662 .hdr_extn = NO_BYTE, 663 .byte_store = NO_BYTE, 664 .search_byte_store = NO_BYTE, 665 .result_pointer = DB_RES_DROP, 666 .num_branches = 1, 667 .branch = { 668 { 669 .valid = 0, 670 .next_packet_pointer = 0, 671 .jump_bw = JMP_FW, 672 .jump_rel = JMP_ABS, 673 .operation = EQT, 674 .next_node = MAX_NODES, 675 .next_branch = 0, 676 .data = 0, 677 .mask = 0xffff 678 } 679 } 680 } 681 }; 682 683 ptree = &enet_cle->ptree; 684 ptree->start_pkt = 12; /* Ethertype */ 685 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { 686 ret = xgene_cle_setup_rss(pdata); 687 if (ret) { 688 netdev_err(pdata->ndev, "RSS initialization failed\n"); 689 return ret; 690 } 691 } else { 692 br = &ptree_dn[PKT_PROT_NODE].branch[0]; 693 br->valid = 0; 694 br->next_packet_pointer = 260; 695 br->next_node = LAST_NODE; 696 br->data = 0x0000; 697 br->mask = 0xffff; 698 } 699 700 def_qid = xgene_enet_dst_ring_num(pdata->rx_ring[0]); 701 pool_id = pdata->rx_ring[0]->buf_pool->id; 702 def_fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20; 703 704 memset(dbptr, 0, sizeof(struct xgene_cle_dbptr) * DB_MAX_PTRS); 705 dbptr[DB_RES_ACCEPT].fpsel = def_fpsel; 706 dbptr[DB_RES_ACCEPT].dstqid = def_qid; 707 dbptr[DB_RES_ACCEPT].cle_priority = 1; 708 709 dbptr[DB_RES_DEF].fpsel = def_fpsel; 710 dbptr[DB_RES_DEF].dstqid = def_qid; 711 dbptr[DB_RES_DEF].cle_priority = 7; 712 xgene_cle_setup_def_dbptr(pdata, enet_cle, &dbptr[DB_RES_DEF], 713 DB_RES_ACCEPT, 7); 714 715 dbptr[DB_RES_DROP].drop = 1; 716 717 memset(&kn, 0, sizeof(kn)); 718 kn.node_type = KN; 719 kn.num_keys = 1; 720 kn.key[0].priority = 0; 721 kn.key[0].result_pointer = DB_RES_ACCEPT; 722 723 ptree->dn = ptree_dn; 724 ptree->kn = &kn; 725 ptree->dbptr = dbptr; 726 ptree->num_dn = MAX_NODES; 727 ptree->num_kn = 1; 728 ptree->num_dbptr = DB_MAX_PTRS; 729 730 return xgene_cle_setup_ptree(pdata, enet_cle); 731 } 732 733 const struct xgene_cle_ops xgene_cle3in_ops = { 734 .cle_init = xgene_enet_cle_init, 735 }; 736