1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Header Parser helpers for Marvell PPv2 Network Controller 4 * 5 * Copyright (C) 2014 Marvell 6 * 7 * Marcin Wojtas <mw@semihalf.com> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/netdevice.h> 12 #include <linux/etherdevice.h> 13 #include <linux/platform_device.h> 14 #include <uapi/linux/ppp_defs.h> 15 #include <net/ip.h> 16 #include <net/ipv6.h> 17 18 #include "mvpp2.h" 19 #include "mvpp2_prs.h" 20 21 /* Update parser tcam and sram hw entries */ 22 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) 23 { 24 int i; 25 26 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) 27 return -EINVAL; 28 29 /* Clear entry invalidation bit */ 30 pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; 31 32 /* Write tcam index - indirect access */ 33 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); 34 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 35 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); 36 37 /* Write sram index - indirect access */ 38 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); 39 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 40 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]); 41 42 return 0; 43 } 44 45 /* Initialize tcam entry from hw */ 46 int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, 47 int tid) 48 { 49 int i; 50 51 if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1) 52 return -EINVAL; 53 54 memset(pe, 0, sizeof(*pe)); 55 pe->index = tid; 56 57 /* Write tcam index - indirect access */ 58 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); 59 60 pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, 61 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); 62 if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) 63 return MVPP2_PRS_TCAM_ENTRY_INVALID; 64 65 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 66 pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); 67 68 /* Write sram index - indirect access */ 69 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); 70 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 71 pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); 72 73 return 0; 74 } 75 76 /* Invalidate tcam hw entry */ 77 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) 78 { 79 /* Write index - indirect access */ 80 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); 81 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD), 82 MVPP2_PRS_TCAM_INV_MASK); 83 } 84 85 /* Enable shadow table entry and set its lookup ID */ 86 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu) 87 { 88 priv->prs_shadow[index].valid = true; 89 priv->prs_shadow[index].lu = lu; 90 } 91 92 /* Update ri fields in shadow table entry */ 93 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, 94 unsigned int ri, unsigned int ri_mask) 95 { 96 priv->prs_shadow[index].ri_mask = ri_mask; 97 priv->prs_shadow[index].ri = ri; 98 } 99 100 /* Update lookup field in tcam sw entry */ 101 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) 102 { 103 pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK); 104 pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK); 105 pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK); 106 pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK); 107 } 108 109 /* Update mask for single port in tcam sw entry */ 110 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, 111 unsigned int port, bool add) 112 { 113 if (add) 114 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port)); 115 else 116 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port)); 117 } 118 119 /* Update port map in tcam sw entry */ 120 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, 121 unsigned int ports) 122 { 123 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK); 124 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK); 125 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK); 126 } 127 128 /* Obtain port map from tcam sw entry */ 129 unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) 130 { 131 return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK; 132 } 133 134 /* Set byte of data and its enable bits in tcam sw entry */ 135 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, 136 unsigned int offs, unsigned char byte, 137 unsigned char enable) 138 { 139 int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE; 140 141 pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos); 142 pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos); 143 pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos; 144 pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos); 145 } 146 147 /* Get byte of data and its enable bits from tcam sw entry */ 148 void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, 149 unsigned int offs, unsigned char *byte, 150 unsigned char *enable) 151 { 152 int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE; 153 154 *byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff; 155 *enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff; 156 } 157 158 /* Compare tcam data bytes with a pattern */ 159 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs, 160 u16 data) 161 { 162 u16 tcam_data; 163 164 tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff; 165 return tcam_data == data; 166 } 167 168 /* Update ai bits in tcam sw entry */ 169 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, 170 unsigned int bits, unsigned int enable) 171 { 172 int i; 173 174 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) { 175 if (!(enable & BIT(i))) 176 continue; 177 178 if (bits & BIT(i)) 179 pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i); 180 else 181 pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i); 182 } 183 184 pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable); 185 } 186 187 /* Get ai bits from tcam sw entry */ 188 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe) 189 { 190 return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK; 191 } 192 193 /* Set ethertype in tcam sw entry */ 194 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset, 195 unsigned short ethertype) 196 { 197 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff); 198 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); 199 } 200 201 /* Set vid in tcam sw entry */ 202 static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset, 203 unsigned short vid) 204 { 205 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf); 206 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff); 207 } 208 209 /* Set bits in sram sw entry */ 210 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, 211 u32 val) 212 { 213 pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num))); 214 } 215 216 /* Clear bits in sram sw entry */ 217 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, 218 u32 val) 219 { 220 pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num))); 221 } 222 223 /* Update ri bits in sram sw entry */ 224 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, 225 unsigned int bits, unsigned int mask) 226 { 227 unsigned int i; 228 229 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { 230 if (!(mask & BIT(i))) 231 continue; 232 233 if (bits & BIT(i)) 234 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i, 235 1); 236 else 237 mvpp2_prs_sram_bits_clear(pe, 238 MVPP2_PRS_SRAM_RI_OFFS + i, 239 1); 240 241 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); 242 } 243 } 244 245 /* Obtain ri bits from sram sw entry */ 246 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe) 247 { 248 return pe->sram[MVPP2_PRS_SRAM_RI_WORD]; 249 } 250 251 /* Update ai bits in sram sw entry */ 252 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, 253 unsigned int bits, unsigned int mask) 254 { 255 unsigned int i; 256 257 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { 258 if (!(mask & BIT(i))) 259 continue; 260 261 if (bits & BIT(i)) 262 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i, 263 1); 264 else 265 mvpp2_prs_sram_bits_clear(pe, 266 MVPP2_PRS_SRAM_AI_OFFS + i, 267 1); 268 269 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); 270 } 271 } 272 273 /* Read ai bits from sram sw entry */ 274 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) 275 { 276 u8 bits; 277 /* ai is stored on bits 90->97; so it spreads across two u32 */ 278 int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS); 279 int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS); 280 281 bits = (pe->sram[ai_off] >> ai_shift) | 282 (pe->sram[ai_off + 1] << (32 - ai_shift)); 283 284 return bits; 285 } 286 287 /* In sram sw entry set lookup ID field of the tcam key to be used in the next 288 * lookup interation 289 */ 290 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, 291 unsigned int lu) 292 { 293 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS; 294 295 mvpp2_prs_sram_bits_clear(pe, sram_next_off, 296 MVPP2_PRS_SRAM_NEXT_LU_MASK); 297 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu); 298 } 299 300 /* In the sram sw entry set sign and value of the next lookup offset 301 * and the offset value generated to the classifier 302 */ 303 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, 304 unsigned int op) 305 { 306 /* Set sign */ 307 if (shift < 0) { 308 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); 309 shift = 0 - shift; 310 } else { 311 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); 312 } 313 314 /* Set value */ 315 pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] = shift & MVPP2_PRS_SRAM_SHIFT_MASK; 316 317 /* Reset and set operation */ 318 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, 319 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK); 320 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op); 321 322 /* Set base offset as current */ 323 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); 324 } 325 326 /* In the sram sw entry set sign and value of the user defined offset 327 * generated to the classifier 328 */ 329 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, 330 unsigned int type, int offset, 331 unsigned int op) 332 { 333 /* Set sign */ 334 if (offset < 0) { 335 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); 336 offset = 0 - offset; 337 } else { 338 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); 339 } 340 341 /* Set value */ 342 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, 343 MVPP2_PRS_SRAM_UDF_MASK); 344 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, 345 offset & MVPP2_PRS_SRAM_UDF_MASK); 346 347 /* Set offset type */ 348 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, 349 MVPP2_PRS_SRAM_UDF_TYPE_MASK); 350 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type); 351 352 /* Set offset operation */ 353 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, 354 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); 355 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, 356 op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); 357 358 /* Set base offset as current */ 359 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); 360 } 361 362 /* Find parser flow entry */ 363 static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) 364 { 365 struct mvpp2_prs_entry pe; 366 int tid; 367 368 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ 369 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { 370 u8 bits; 371 372 if (!priv->prs_shadow[tid].valid || 373 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) 374 continue; 375 376 mvpp2_prs_init_from_hw(priv, &pe, tid); 377 bits = mvpp2_prs_sram_ai_get(&pe); 378 379 /* Sram store classification lookup ID in AI bits [5:0] */ 380 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) 381 return tid; 382 } 383 384 return -ENOENT; 385 } 386 387 /* Return first free tcam index, seeking from start to end */ 388 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, 389 unsigned char end) 390 { 391 int tid; 392 393 if (start > end) 394 swap(start, end); 395 396 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE) 397 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1; 398 399 for (tid = start; tid <= end; tid++) { 400 if (!priv->prs_shadow[tid].valid) 401 return tid; 402 } 403 404 return -EINVAL; 405 } 406 407 /* Enable/disable dropping all mac da's */ 408 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) 409 { 410 struct mvpp2_prs_entry pe; 411 412 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { 413 /* Entry exist - update port only */ 414 mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); 415 } else { 416 /* Entry doesn't exist - create new */ 417 memset(&pe, 0, sizeof(pe)); 418 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 419 pe.index = MVPP2_PE_DROP_ALL; 420 421 /* Non-promiscuous mode for all ports - DROP unknown packets */ 422 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 423 MVPP2_PRS_RI_DROP_MASK); 424 425 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 426 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 427 428 /* Update shadow table */ 429 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 430 431 /* Mask all ports */ 432 mvpp2_prs_tcam_port_map_set(&pe, 0); 433 } 434 435 /* Update port mask */ 436 mvpp2_prs_tcam_port_set(&pe, port, add); 437 438 mvpp2_prs_hw_write(priv, &pe); 439 } 440 441 /* Set port to unicast or multicast promiscuous mode */ 442 void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, 443 enum mvpp2_prs_l2_cast l2_cast, bool add) 444 { 445 struct mvpp2_prs_entry pe; 446 unsigned char cast_match; 447 unsigned int ri; 448 int tid; 449 450 if (l2_cast == MVPP2_PRS_L2_UNI_CAST) { 451 cast_match = MVPP2_PRS_UCAST_VAL; 452 tid = MVPP2_PE_MAC_UC_PROMISCUOUS; 453 ri = MVPP2_PRS_RI_L2_UCAST; 454 } else { 455 cast_match = MVPP2_PRS_MCAST_VAL; 456 tid = MVPP2_PE_MAC_MC_PROMISCUOUS; 457 ri = MVPP2_PRS_RI_L2_MCAST; 458 } 459 460 /* promiscuous mode - Accept unknown unicast or multicast packets */ 461 if (priv->prs_shadow[tid].valid) { 462 mvpp2_prs_init_from_hw(priv, &pe, tid); 463 } else { 464 memset(&pe, 0, sizeof(pe)); 465 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 466 pe.index = tid; 467 468 /* Continue - set next lookup */ 469 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); 470 471 /* Set result info bits */ 472 mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK); 473 474 /* Match UC or MC addresses */ 475 mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match, 476 MVPP2_PRS_CAST_MASK); 477 478 /* Shift to ethertype */ 479 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, 480 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 481 482 /* Mask all ports */ 483 mvpp2_prs_tcam_port_map_set(&pe, 0); 484 485 /* Update shadow table */ 486 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 487 } 488 489 /* Update port mask */ 490 mvpp2_prs_tcam_port_set(&pe, port, add); 491 492 mvpp2_prs_hw_write(priv, &pe); 493 } 494 495 /* Set entry for dsa packets */ 496 static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, 497 bool tagged, bool extend) 498 { 499 struct mvpp2_prs_entry pe; 500 int tid, shift; 501 502 if (extend) { 503 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED; 504 shift = 8; 505 } else { 506 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED; 507 shift = 4; 508 } 509 510 if (priv->prs_shadow[tid].valid) { 511 /* Entry exist - update port only */ 512 mvpp2_prs_init_from_hw(priv, &pe, tid); 513 } else { 514 /* Entry doesn't exist - create new */ 515 memset(&pe, 0, sizeof(pe)); 516 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); 517 pe.index = tid; 518 519 /* Update shadow table */ 520 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); 521 522 if (tagged) { 523 /* Set tagged bit in DSA tag */ 524 mvpp2_prs_tcam_data_byte_set(&pe, 0, 525 MVPP2_PRS_TCAM_DSA_TAGGED_BIT, 526 MVPP2_PRS_TCAM_DSA_TAGGED_BIT); 527 528 /* Set ai bits for next iteration */ 529 if (extend) 530 mvpp2_prs_sram_ai_update(&pe, 1, 531 MVPP2_PRS_SRAM_AI_MASK); 532 else 533 mvpp2_prs_sram_ai_update(&pe, 0, 534 MVPP2_PRS_SRAM_AI_MASK); 535 536 /* Set result info bits to 'single vlan' */ 537 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE, 538 MVPP2_PRS_RI_VLAN_MASK); 539 /* If packet is tagged continue check vid filtering */ 540 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); 541 } else { 542 /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/ 543 mvpp2_prs_sram_shift_set(&pe, shift, 544 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 545 546 /* Set result info bits to 'no vlans' */ 547 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, 548 MVPP2_PRS_RI_VLAN_MASK); 549 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); 550 } 551 552 /* Mask all ports */ 553 mvpp2_prs_tcam_port_map_set(&pe, 0); 554 } 555 556 /* Update port mask */ 557 mvpp2_prs_tcam_port_set(&pe, port, add); 558 559 mvpp2_prs_hw_write(priv, &pe); 560 } 561 562 /* Set entry for dsa ethertype */ 563 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, 564 bool add, bool tagged, bool extend) 565 { 566 struct mvpp2_prs_entry pe; 567 int tid, shift, port_mask; 568 569 if (extend) { 570 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED : 571 MVPP2_PE_ETYPE_EDSA_UNTAGGED; 572 port_mask = 0; 573 shift = 8; 574 } else { 575 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED : 576 MVPP2_PE_ETYPE_DSA_UNTAGGED; 577 port_mask = MVPP2_PRS_PORT_MASK; 578 shift = 4; 579 } 580 581 if (priv->prs_shadow[tid].valid) { 582 /* Entry exist - update port only */ 583 mvpp2_prs_init_from_hw(priv, &pe, tid); 584 } else { 585 /* Entry doesn't exist - create new */ 586 memset(&pe, 0, sizeof(pe)); 587 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); 588 pe.index = tid; 589 590 /* Set ethertype */ 591 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA); 592 mvpp2_prs_match_etype(&pe, 2, 0); 593 594 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK, 595 MVPP2_PRS_RI_DSA_MASK); 596 /* Shift ethertype + 2 byte reserved + tag*/ 597 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift, 598 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 599 600 /* Update shadow table */ 601 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); 602 603 if (tagged) { 604 /* Set tagged bit in DSA tag */ 605 mvpp2_prs_tcam_data_byte_set(&pe, 606 MVPP2_ETH_TYPE_LEN + 2 + 3, 607 MVPP2_PRS_TCAM_DSA_TAGGED_BIT, 608 MVPP2_PRS_TCAM_DSA_TAGGED_BIT); 609 /* Clear all ai bits for next iteration */ 610 mvpp2_prs_sram_ai_update(&pe, 0, 611 MVPP2_PRS_SRAM_AI_MASK); 612 /* If packet is tagged continue check vlans */ 613 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); 614 } else { 615 /* Set result info bits to 'no vlans' */ 616 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, 617 MVPP2_PRS_RI_VLAN_MASK); 618 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); 619 } 620 /* Mask/unmask all ports, depending on dsa type */ 621 mvpp2_prs_tcam_port_map_set(&pe, port_mask); 622 } 623 624 /* Update port mask */ 625 mvpp2_prs_tcam_port_set(&pe, port, add); 626 627 mvpp2_prs_hw_write(priv, &pe); 628 } 629 630 /* Search for existing single/triple vlan entry */ 631 static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai) 632 { 633 struct mvpp2_prs_entry pe; 634 int tid; 635 636 /* Go through the all entries with MVPP2_PRS_LU_VLAN */ 637 for (tid = MVPP2_PE_FIRST_FREE_TID; 638 tid <= MVPP2_PE_LAST_FREE_TID; tid++) { 639 unsigned int ri_bits, ai_bits; 640 bool match; 641 642 if (!priv->prs_shadow[tid].valid || 643 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) 644 continue; 645 646 mvpp2_prs_init_from_hw(priv, &pe, tid); 647 match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid); 648 if (!match) 649 continue; 650 651 /* Get vlan type */ 652 ri_bits = mvpp2_prs_sram_ri_get(&pe); 653 ri_bits &= MVPP2_PRS_RI_VLAN_MASK; 654 655 /* Get current ai value from tcam */ 656 ai_bits = mvpp2_prs_tcam_ai_get(&pe); 657 /* Clear double vlan bit */ 658 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT; 659 660 if (ai != ai_bits) 661 continue; 662 663 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || 664 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) 665 return tid; 666 } 667 668 return -ENOENT; 669 } 670 671 /* Add/update single/triple vlan entry */ 672 static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, 673 unsigned int port_map) 674 { 675 struct mvpp2_prs_entry pe; 676 int tid_aux, tid; 677 int ret = 0; 678 679 memset(&pe, 0, sizeof(pe)); 680 681 tid = mvpp2_prs_vlan_find(priv, tpid, ai); 682 683 if (tid < 0) { 684 /* Create new tcam entry */ 685 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, 686 MVPP2_PE_FIRST_FREE_TID); 687 if (tid < 0) 688 return tid; 689 690 /* Get last double vlan tid */ 691 for (tid_aux = MVPP2_PE_LAST_FREE_TID; 692 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) { 693 unsigned int ri_bits; 694 695 if (!priv->prs_shadow[tid_aux].valid || 696 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) 697 continue; 698 699 mvpp2_prs_init_from_hw(priv, &pe, tid_aux); 700 ri_bits = mvpp2_prs_sram_ri_get(&pe); 701 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) == 702 MVPP2_PRS_RI_VLAN_DOUBLE) 703 break; 704 } 705 706 if (tid <= tid_aux) 707 return -EINVAL; 708 709 memset(&pe, 0, sizeof(pe)); 710 pe.index = tid; 711 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); 712 713 mvpp2_prs_match_etype(&pe, 0, tpid); 714 715 /* VLAN tag detected, proceed with VID filtering */ 716 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); 717 718 /* Clear all ai bits for next iteration */ 719 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); 720 721 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) { 722 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE, 723 MVPP2_PRS_RI_VLAN_MASK); 724 } else { 725 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT; 726 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE, 727 MVPP2_PRS_RI_VLAN_MASK); 728 } 729 mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK); 730 731 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); 732 } else { 733 mvpp2_prs_init_from_hw(priv, &pe, tid); 734 } 735 /* Update ports' mask */ 736 mvpp2_prs_tcam_port_map_set(&pe, port_map); 737 738 mvpp2_prs_hw_write(priv, &pe); 739 740 return ret; 741 } 742 743 /* Get first free double vlan ai number */ 744 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv) 745 { 746 int i; 747 748 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) { 749 if (!priv->prs_double_vlans[i]) 750 return i; 751 } 752 753 return -EINVAL; 754 } 755 756 /* Search for existing double vlan entry */ 757 static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1, 758 unsigned short tpid2) 759 { 760 struct mvpp2_prs_entry pe; 761 int tid; 762 763 /* Go through the all entries with MVPP2_PRS_LU_VLAN */ 764 for (tid = MVPP2_PE_FIRST_FREE_TID; 765 tid <= MVPP2_PE_LAST_FREE_TID; tid++) { 766 unsigned int ri_mask; 767 bool match; 768 769 if (!priv->prs_shadow[tid].valid || 770 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) 771 continue; 772 773 mvpp2_prs_init_from_hw(priv, &pe, tid); 774 775 match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) && 776 mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2); 777 778 if (!match) 779 continue; 780 781 ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK; 782 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE) 783 return tid; 784 } 785 786 return -ENOENT; 787 } 788 789 /* Add or update double vlan entry */ 790 static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, 791 unsigned short tpid2, 792 unsigned int port_map) 793 { 794 int tid_aux, tid, ai, ret = 0; 795 struct mvpp2_prs_entry pe; 796 797 memset(&pe, 0, sizeof(pe)); 798 799 tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); 800 801 if (tid < 0) { 802 /* Create new tcam entry */ 803 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 804 MVPP2_PE_LAST_FREE_TID); 805 if (tid < 0) 806 return tid; 807 808 /* Set ai value for new double vlan entry */ 809 ai = mvpp2_prs_double_vlan_ai_free_get(priv); 810 if (ai < 0) 811 return ai; 812 813 /* Get first single/triple vlan tid */ 814 for (tid_aux = MVPP2_PE_FIRST_FREE_TID; 815 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) { 816 unsigned int ri_bits; 817 818 if (!priv->prs_shadow[tid_aux].valid || 819 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) 820 continue; 821 822 mvpp2_prs_init_from_hw(priv, &pe, tid_aux); 823 ri_bits = mvpp2_prs_sram_ri_get(&pe); 824 ri_bits &= MVPP2_PRS_RI_VLAN_MASK; 825 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || 826 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) 827 break; 828 } 829 830 if (tid >= tid_aux) 831 return -ERANGE; 832 833 memset(&pe, 0, sizeof(pe)); 834 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); 835 pe.index = tid; 836 837 priv->prs_double_vlans[ai] = true; 838 839 mvpp2_prs_match_etype(&pe, 0, tpid1); 840 mvpp2_prs_match_etype(&pe, 4, tpid2); 841 842 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); 843 /* Shift 4 bytes - skip outer vlan tag */ 844 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN, 845 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 846 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, 847 MVPP2_PRS_RI_VLAN_MASK); 848 mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT, 849 MVPP2_PRS_SRAM_AI_MASK); 850 851 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); 852 } else { 853 mvpp2_prs_init_from_hw(priv, &pe, tid); 854 } 855 856 /* Update ports' mask */ 857 mvpp2_prs_tcam_port_map_set(&pe, port_map); 858 mvpp2_prs_hw_write(priv, &pe); 859 860 return ret; 861 } 862 863 /* IPv4 header parsing for fragmentation and L4 offset */ 864 static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, 865 unsigned int ri, unsigned int ri_mask) 866 { 867 struct mvpp2_prs_entry pe; 868 int tid; 869 870 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && 871 (proto != IPPROTO_IGMP)) 872 return -EINVAL; 873 874 /* Not fragmented packet */ 875 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 876 MVPP2_PE_LAST_FREE_TID); 877 if (tid < 0) 878 return tid; 879 880 memset(&pe, 0, sizeof(pe)); 881 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); 882 pe.index = tid; 883 884 /* Set next lu to IPv4 */ 885 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); 886 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 887 /* Set L4 offset */ 888 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, 889 sizeof(struct iphdr) - 4, 890 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 891 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, 892 MVPP2_PRS_IPV4_DIP_AI_BIT); 893 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); 894 895 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 896 MVPP2_PRS_TCAM_PROTO_MASK_L); 897 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 898 MVPP2_PRS_TCAM_PROTO_MASK); 899 900 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK); 901 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); 902 /* Unmask all ports */ 903 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 904 905 /* Update shadow table and hw entry */ 906 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 907 mvpp2_prs_hw_write(priv, &pe); 908 909 /* Fragmented packet */ 910 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 911 MVPP2_PE_LAST_FREE_TID); 912 if (tid < 0) 913 return tid; 914 915 pe.index = tid; 916 /* Clear ri before updating */ 917 pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0; 918 pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; 919 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); 920 921 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE, 922 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); 923 924 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0); 925 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0); 926 927 /* Update shadow table and hw entry */ 928 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 929 mvpp2_prs_hw_write(priv, &pe); 930 931 return 0; 932 } 933 934 /* IPv4 L3 multicast or broadcast */ 935 static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast) 936 { 937 struct mvpp2_prs_entry pe; 938 int mask, tid; 939 940 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 941 MVPP2_PE_LAST_FREE_TID); 942 if (tid < 0) 943 return tid; 944 945 memset(&pe, 0, sizeof(pe)); 946 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); 947 pe.index = tid; 948 949 switch (l3_cast) { 950 case MVPP2_PRS_L3_MULTI_CAST: 951 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC, 952 MVPP2_PRS_IPV4_MC_MASK); 953 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, 954 MVPP2_PRS_RI_L3_ADDR_MASK); 955 break; 956 case MVPP2_PRS_L3_BROAD_CAST: 957 mask = MVPP2_PRS_IPV4_BC_MASK; 958 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask); 959 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask); 960 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask); 961 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask); 962 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST, 963 MVPP2_PRS_RI_L3_ADDR_MASK); 964 break; 965 default: 966 return -EINVAL; 967 } 968 969 /* Finished: go to flowid generation */ 970 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 971 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 972 973 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, 974 MVPP2_PRS_IPV4_DIP_AI_BIT); 975 /* Unmask all ports */ 976 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 977 978 /* Update shadow table and hw entry */ 979 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 980 mvpp2_prs_hw_write(priv, &pe); 981 982 return 0; 983 } 984 985 /* Set entries for protocols over IPv6 */ 986 static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto, 987 unsigned int ri, unsigned int ri_mask) 988 { 989 struct mvpp2_prs_entry pe; 990 int tid; 991 992 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && 993 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP)) 994 return -EINVAL; 995 996 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 997 MVPP2_PE_LAST_FREE_TID); 998 if (tid < 0) 999 return tid; 1000 1001 memset(&pe, 0, sizeof(pe)); 1002 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); 1003 pe.index = tid; 1004 1005 /* Finished: go to flowid generation */ 1006 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1007 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1008 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); 1009 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, 1010 sizeof(struct ipv6hdr) - 6, 1011 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1012 1013 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK); 1014 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, 1015 MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 1016 /* Unmask all ports */ 1017 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1018 1019 /* Write HW */ 1020 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); 1021 mvpp2_prs_hw_write(priv, &pe); 1022 1023 return 0; 1024 } 1025 1026 /* IPv6 L3 multicast entry */ 1027 static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast) 1028 { 1029 struct mvpp2_prs_entry pe; 1030 int tid; 1031 1032 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST) 1033 return -EINVAL; 1034 1035 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1036 MVPP2_PE_LAST_FREE_TID); 1037 if (tid < 0) 1038 return tid; 1039 1040 memset(&pe, 0, sizeof(pe)); 1041 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); 1042 pe.index = tid; 1043 1044 /* Finished: go to flowid generation */ 1045 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); 1046 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, 1047 MVPP2_PRS_RI_L3_ADDR_MASK); 1048 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, 1049 MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 1050 /* Shift back to IPv6 NH */ 1051 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1052 1053 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC, 1054 MVPP2_PRS_IPV6_MC_MASK); 1055 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 1056 /* Unmask all ports */ 1057 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1058 1059 /* Update shadow table and hw entry */ 1060 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); 1061 mvpp2_prs_hw_write(priv, &pe); 1062 1063 return 0; 1064 } 1065 1066 /* Parser per-port initialization */ 1067 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first, 1068 int lu_max, int offset) 1069 { 1070 u32 val; 1071 1072 /* Set lookup ID */ 1073 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG); 1074 val &= ~MVPP2_PRS_PORT_LU_MASK(port); 1075 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first); 1076 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val); 1077 1078 /* Set maximum number of loops for packet received from port */ 1079 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port)); 1080 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port); 1081 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max); 1082 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val); 1083 1084 /* Set initial offset for packet header extraction for the first 1085 * searching loop 1086 */ 1087 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port)); 1088 val &= ~MVPP2_PRS_INIT_OFF_MASK(port); 1089 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset); 1090 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val); 1091 } 1092 1093 /* Default flow entries initialization for all ports */ 1094 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv) 1095 { 1096 struct mvpp2_prs_entry pe; 1097 int port; 1098 1099 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 1100 memset(&pe, 0, sizeof(pe)); 1101 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1102 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; 1103 1104 /* Mask all ports */ 1105 mvpp2_prs_tcam_port_map_set(&pe, 0); 1106 1107 /* Set flow ID*/ 1108 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK); 1109 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 1110 1111 /* Update shadow table and hw entry */ 1112 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); 1113 mvpp2_prs_hw_write(priv, &pe); 1114 } 1115 } 1116 1117 /* Set default entry for Marvell Header field */ 1118 static void mvpp2_prs_mh_init(struct mvpp2 *priv) 1119 { 1120 struct mvpp2_prs_entry pe; 1121 1122 memset(&pe, 0, sizeof(pe)); 1123 1124 pe.index = MVPP2_PE_MH_DEFAULT; 1125 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); 1126 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, 1127 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1128 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC); 1129 1130 /* Unmask all ports */ 1131 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1132 1133 /* Update shadow table and hw entry */ 1134 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); 1135 mvpp2_prs_hw_write(priv, &pe); 1136 } 1137 1138 /* Set default entires (place holder) for promiscuous, non-promiscuous and 1139 * multicast MAC addresses 1140 */ 1141 static void mvpp2_prs_mac_init(struct mvpp2 *priv) 1142 { 1143 struct mvpp2_prs_entry pe; 1144 1145 memset(&pe, 0, sizeof(pe)); 1146 1147 /* Non-promiscuous mode for all ports - DROP unknown packets */ 1148 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; 1149 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1150 1151 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 1152 MVPP2_PRS_RI_DROP_MASK); 1153 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1154 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1155 1156 /* Unmask all ports */ 1157 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1158 1159 /* Update shadow table and hw entry */ 1160 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1161 mvpp2_prs_hw_write(priv, &pe); 1162 1163 /* Create dummy entries for drop all and promiscuous modes */ 1164 mvpp2_prs_mac_drop_all_set(priv, 0, false); 1165 mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); 1166 mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); 1167 } 1168 1169 /* Set default entries for various types of dsa packets */ 1170 static void mvpp2_prs_dsa_init(struct mvpp2 *priv) 1171 { 1172 struct mvpp2_prs_entry pe; 1173 1174 /* None tagged EDSA entry - place holder */ 1175 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, 1176 MVPP2_PRS_EDSA); 1177 1178 /* Tagged EDSA entry - place holder */ 1179 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); 1180 1181 /* None tagged DSA entry - place holder */ 1182 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, 1183 MVPP2_PRS_DSA); 1184 1185 /* Tagged DSA entry - place holder */ 1186 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); 1187 1188 /* None tagged EDSA ethertype entry - place holder*/ 1189 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, 1190 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); 1191 1192 /* Tagged EDSA ethertype entry - place holder*/ 1193 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, 1194 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); 1195 1196 /* None tagged DSA ethertype entry */ 1197 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, 1198 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); 1199 1200 /* Tagged DSA ethertype entry */ 1201 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, 1202 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); 1203 1204 /* Set default entry, in case DSA or EDSA tag not found */ 1205 memset(&pe, 0, sizeof(pe)); 1206 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); 1207 pe.index = MVPP2_PE_DSA_DEFAULT; 1208 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); 1209 1210 /* Shift 0 bytes */ 1211 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1212 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1213 1214 /* Clear all sram ai bits for next iteration */ 1215 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); 1216 1217 /* Unmask all ports */ 1218 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1219 1220 mvpp2_prs_hw_write(priv, &pe); 1221 } 1222 1223 /* Initialize parser entries for VID filtering */ 1224 static void mvpp2_prs_vid_init(struct mvpp2 *priv) 1225 { 1226 struct mvpp2_prs_entry pe; 1227 1228 memset(&pe, 0, sizeof(pe)); 1229 1230 /* Set default vid entry */ 1231 pe.index = MVPP2_PE_VID_FLTR_DEFAULT; 1232 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); 1233 1234 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT); 1235 1236 /* Skip VLAN header - Set offset to 4 bytes */ 1237 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN, 1238 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1239 1240 /* Clear all ai bits for next iteration */ 1241 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); 1242 1243 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); 1244 1245 /* Unmask all ports */ 1246 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1247 1248 /* Update shadow table and hw entry */ 1249 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); 1250 mvpp2_prs_hw_write(priv, &pe); 1251 1252 /* Set default vid entry for extended DSA*/ 1253 memset(&pe, 0, sizeof(pe)); 1254 1255 /* Set default vid entry */ 1256 pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT; 1257 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); 1258 1259 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT, 1260 MVPP2_PRS_EDSA_VID_AI_BIT); 1261 1262 /* Skip VLAN header - Set offset to 8 bytes */ 1263 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN, 1264 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1265 1266 /* Clear all ai bits for next iteration */ 1267 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); 1268 1269 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); 1270 1271 /* Unmask all ports */ 1272 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1273 1274 /* Update shadow table and hw entry */ 1275 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); 1276 mvpp2_prs_hw_write(priv, &pe); 1277 } 1278 1279 /* Match basic ethertypes */ 1280 static int mvpp2_prs_etype_init(struct mvpp2 *priv) 1281 { 1282 struct mvpp2_prs_entry pe; 1283 int tid; 1284 1285 /* Ethertype: PPPoE */ 1286 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1287 MVPP2_PE_LAST_FREE_TID); 1288 if (tid < 0) 1289 return tid; 1290 1291 memset(&pe, 0, sizeof(pe)); 1292 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 1293 pe.index = tid; 1294 1295 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES); 1296 1297 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE, 1298 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1299 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE); 1300 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK, 1301 MVPP2_PRS_RI_PPPOE_MASK); 1302 1303 /* Update shadow table and hw entry */ 1304 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 1305 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 1306 priv->prs_shadow[pe.index].finish = false; 1307 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK, 1308 MVPP2_PRS_RI_PPPOE_MASK); 1309 mvpp2_prs_hw_write(priv, &pe); 1310 1311 /* Ethertype: ARP */ 1312 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1313 MVPP2_PE_LAST_FREE_TID); 1314 if (tid < 0) 1315 return tid; 1316 1317 memset(&pe, 0, sizeof(pe)); 1318 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 1319 pe.index = tid; 1320 1321 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP); 1322 1323 /* Generate flow in the next iteration*/ 1324 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1325 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1326 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP, 1327 MVPP2_PRS_RI_L3_PROTO_MASK); 1328 /* Set L3 offset */ 1329 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 1330 MVPP2_ETH_TYPE_LEN, 1331 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1332 1333 /* Update shadow table and hw entry */ 1334 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 1335 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 1336 priv->prs_shadow[pe.index].finish = true; 1337 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP, 1338 MVPP2_PRS_RI_L3_PROTO_MASK); 1339 mvpp2_prs_hw_write(priv, &pe); 1340 1341 /* Ethertype: LBTD */ 1342 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1343 MVPP2_PE_LAST_FREE_TID); 1344 if (tid < 0) 1345 return tid; 1346 1347 memset(&pe, 0, sizeof(pe)); 1348 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 1349 pe.index = tid; 1350 1351 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE); 1352 1353 /* Generate flow in the next iteration*/ 1354 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1355 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1356 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 1357 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 1358 MVPP2_PRS_RI_CPU_CODE_MASK | 1359 MVPP2_PRS_RI_UDF3_MASK); 1360 /* Set L3 offset */ 1361 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 1362 MVPP2_ETH_TYPE_LEN, 1363 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1364 1365 /* Update shadow table and hw entry */ 1366 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 1367 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 1368 priv->prs_shadow[pe.index].finish = true; 1369 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 1370 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 1371 MVPP2_PRS_RI_CPU_CODE_MASK | 1372 MVPP2_PRS_RI_UDF3_MASK); 1373 mvpp2_prs_hw_write(priv, &pe); 1374 1375 /* Ethertype: IPv4 without options */ 1376 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1377 MVPP2_PE_LAST_FREE_TID); 1378 if (tid < 0) 1379 return tid; 1380 1381 memset(&pe, 0, sizeof(pe)); 1382 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 1383 pe.index = tid; 1384 1385 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP); 1386 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, 1387 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, 1388 MVPP2_PRS_IPV4_HEAD_MASK | 1389 MVPP2_PRS_IPV4_IHL_MASK); 1390 1391 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); 1392 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, 1393 MVPP2_PRS_RI_L3_PROTO_MASK); 1394 /* Skip eth_type + 4 bytes of IP header */ 1395 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, 1396 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1397 /* Set L3 offset */ 1398 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 1399 MVPP2_ETH_TYPE_LEN, 1400 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1401 1402 /* Update shadow table and hw entry */ 1403 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 1404 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 1405 priv->prs_shadow[pe.index].finish = false; 1406 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, 1407 MVPP2_PRS_RI_L3_PROTO_MASK); 1408 mvpp2_prs_hw_write(priv, &pe); 1409 1410 /* Ethertype: IPv4 with options */ 1411 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1412 MVPP2_PE_LAST_FREE_TID); 1413 if (tid < 0) 1414 return tid; 1415 1416 pe.index = tid; 1417 1418 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, 1419 MVPP2_PRS_IPV4_HEAD, 1420 MVPP2_PRS_IPV4_HEAD_MASK); 1421 1422 /* Clear ri before updating */ 1423 pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0; 1424 pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; 1425 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, 1426 MVPP2_PRS_RI_L3_PROTO_MASK); 1427 1428 /* Update shadow table and hw entry */ 1429 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 1430 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 1431 priv->prs_shadow[pe.index].finish = false; 1432 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT, 1433 MVPP2_PRS_RI_L3_PROTO_MASK); 1434 mvpp2_prs_hw_write(priv, &pe); 1435 1436 /* Ethertype: IPv6 without options */ 1437 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1438 MVPP2_PE_LAST_FREE_TID); 1439 if (tid < 0) 1440 return tid; 1441 1442 memset(&pe, 0, sizeof(pe)); 1443 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 1444 pe.index = tid; 1445 1446 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6); 1447 1448 /* Skip DIP of IPV6 header */ 1449 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + 1450 MVPP2_MAX_L3_ADDR_SIZE, 1451 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1452 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); 1453 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, 1454 MVPP2_PRS_RI_L3_PROTO_MASK); 1455 /* Set L3 offset */ 1456 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 1457 MVPP2_ETH_TYPE_LEN, 1458 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1459 1460 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 1461 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 1462 priv->prs_shadow[pe.index].finish = false; 1463 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6, 1464 MVPP2_PRS_RI_L3_PROTO_MASK); 1465 mvpp2_prs_hw_write(priv, &pe); 1466 1467 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */ 1468 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1469 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 1470 pe.index = MVPP2_PE_ETH_TYPE_UN; 1471 1472 /* Unmask all ports */ 1473 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1474 1475 /* Generate flow in the next iteration*/ 1476 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1477 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1478 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, 1479 MVPP2_PRS_RI_L3_PROTO_MASK); 1480 /* Set L3 offset even it's unknown L3 */ 1481 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 1482 MVPP2_ETH_TYPE_LEN, 1483 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1484 1485 /* Update shadow table and hw entry */ 1486 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 1487 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 1488 priv->prs_shadow[pe.index].finish = true; 1489 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN, 1490 MVPP2_PRS_RI_L3_PROTO_MASK); 1491 mvpp2_prs_hw_write(priv, &pe); 1492 1493 return 0; 1494 } 1495 1496 /* Configure vlan entries and detect up to 2 successive VLAN tags. 1497 * Possible options: 1498 * 0x8100, 0x88A8 1499 * 0x8100, 0x8100 1500 * 0x8100 1501 * 0x88A8 1502 */ 1503 static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) 1504 { 1505 struct mvpp2_prs_entry pe; 1506 int err; 1507 1508 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), 1509 MVPP2_PRS_DBL_VLANS_MAX, 1510 GFP_KERNEL); 1511 if (!priv->prs_double_vlans) 1512 return -ENOMEM; 1513 1514 /* Double VLAN: 0x8100, 0x88A8 */ 1515 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD, 1516 MVPP2_PRS_PORT_MASK); 1517 if (err) 1518 return err; 1519 1520 /* Double VLAN: 0x8100, 0x8100 */ 1521 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q, 1522 MVPP2_PRS_PORT_MASK); 1523 if (err) 1524 return err; 1525 1526 /* Single VLAN: 0x88a8 */ 1527 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI, 1528 MVPP2_PRS_PORT_MASK); 1529 if (err) 1530 return err; 1531 1532 /* Single VLAN: 0x8100 */ 1533 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI, 1534 MVPP2_PRS_PORT_MASK); 1535 if (err) 1536 return err; 1537 1538 /* Set default double vlan entry */ 1539 memset(&pe, 0, sizeof(pe)); 1540 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); 1541 pe.index = MVPP2_PE_VLAN_DBL; 1542 1543 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); 1544 1545 /* Clear ai for next iterations */ 1546 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); 1547 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, 1548 MVPP2_PRS_RI_VLAN_MASK); 1549 1550 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT, 1551 MVPP2_PRS_DBL_VLAN_AI_BIT); 1552 /* Unmask all ports */ 1553 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1554 1555 /* Update shadow table and hw entry */ 1556 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); 1557 mvpp2_prs_hw_write(priv, &pe); 1558 1559 /* Set default vlan none entry */ 1560 memset(&pe, 0, sizeof(pe)); 1561 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); 1562 pe.index = MVPP2_PE_VLAN_NONE; 1563 1564 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); 1565 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, 1566 MVPP2_PRS_RI_VLAN_MASK); 1567 1568 /* Unmask all ports */ 1569 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1570 1571 /* Update shadow table and hw entry */ 1572 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); 1573 mvpp2_prs_hw_write(priv, &pe); 1574 1575 return 0; 1576 } 1577 1578 /* Set entries for PPPoE ethertype */ 1579 static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) 1580 { 1581 struct mvpp2_prs_entry pe; 1582 int tid; 1583 1584 /* IPv4 over PPPoE with options */ 1585 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1586 MVPP2_PE_LAST_FREE_TID); 1587 if (tid < 0) 1588 return tid; 1589 1590 memset(&pe, 0, sizeof(pe)); 1591 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); 1592 pe.index = tid; 1593 1594 mvpp2_prs_match_etype(&pe, 0, PPP_IP); 1595 1596 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); 1597 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, 1598 MVPP2_PRS_RI_L3_PROTO_MASK); 1599 /* Skip eth_type + 4 bytes of IP header */ 1600 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, 1601 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1602 /* Set L3 offset */ 1603 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 1604 MVPP2_ETH_TYPE_LEN, 1605 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1606 1607 /* Update shadow table and hw entry */ 1608 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); 1609 mvpp2_prs_hw_write(priv, &pe); 1610 1611 /* IPv4 over PPPoE without options */ 1612 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1613 MVPP2_PE_LAST_FREE_TID); 1614 if (tid < 0) 1615 return tid; 1616 1617 pe.index = tid; 1618 1619 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, 1620 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, 1621 MVPP2_PRS_IPV4_HEAD_MASK | 1622 MVPP2_PRS_IPV4_IHL_MASK); 1623 1624 /* Clear ri before updating */ 1625 pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0; 1626 pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; 1627 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, 1628 MVPP2_PRS_RI_L3_PROTO_MASK); 1629 1630 /* Update shadow table and hw entry */ 1631 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); 1632 mvpp2_prs_hw_write(priv, &pe); 1633 1634 /* IPv6 over PPPoE */ 1635 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1636 MVPP2_PE_LAST_FREE_TID); 1637 if (tid < 0) 1638 return tid; 1639 1640 memset(&pe, 0, sizeof(pe)); 1641 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); 1642 pe.index = tid; 1643 1644 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6); 1645 1646 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); 1647 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, 1648 MVPP2_PRS_RI_L3_PROTO_MASK); 1649 /* Skip eth_type + 4 bytes of IPv6 header */ 1650 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, 1651 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1652 /* Set L3 offset */ 1653 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 1654 MVPP2_ETH_TYPE_LEN, 1655 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1656 1657 /* Update shadow table and hw entry */ 1658 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); 1659 mvpp2_prs_hw_write(priv, &pe); 1660 1661 /* Non-IP over PPPoE */ 1662 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1663 MVPP2_PE_LAST_FREE_TID); 1664 if (tid < 0) 1665 return tid; 1666 1667 memset(&pe, 0, sizeof(pe)); 1668 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); 1669 pe.index = tid; 1670 1671 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, 1672 MVPP2_PRS_RI_L3_PROTO_MASK); 1673 1674 /* Finished: go to flowid generation */ 1675 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1676 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1677 /* Set L3 offset even if it's unknown L3 */ 1678 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 1679 MVPP2_ETH_TYPE_LEN, 1680 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1681 1682 /* Update shadow table and hw entry */ 1683 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); 1684 mvpp2_prs_hw_write(priv, &pe); 1685 1686 return 0; 1687 } 1688 1689 /* Initialize entries for IPv4 */ 1690 static int mvpp2_prs_ip4_init(struct mvpp2 *priv) 1691 { 1692 struct mvpp2_prs_entry pe; 1693 int err; 1694 1695 /* Set entries for TCP, UDP and IGMP over IPv4 */ 1696 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP, 1697 MVPP2_PRS_RI_L4_PROTO_MASK); 1698 if (err) 1699 return err; 1700 1701 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP, 1702 MVPP2_PRS_RI_L4_PROTO_MASK); 1703 if (err) 1704 return err; 1705 1706 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP, 1707 MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 1708 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 1709 MVPP2_PRS_RI_CPU_CODE_MASK | 1710 MVPP2_PRS_RI_UDF3_MASK); 1711 if (err) 1712 return err; 1713 1714 /* IPv4 Broadcast */ 1715 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST); 1716 if (err) 1717 return err; 1718 1719 /* IPv4 Multicast */ 1720 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST); 1721 if (err) 1722 return err; 1723 1724 /* Default IPv4 entry for unknown protocols */ 1725 memset(&pe, 0, sizeof(pe)); 1726 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); 1727 pe.index = MVPP2_PE_IP4_PROTO_UN; 1728 1729 /* Set next lu to IPv4 */ 1730 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); 1731 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1732 /* Set L4 offset */ 1733 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, 1734 sizeof(struct iphdr) - 4, 1735 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1736 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, 1737 MVPP2_PRS_IPV4_DIP_AI_BIT); 1738 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, 1739 MVPP2_PRS_RI_L4_PROTO_MASK); 1740 1741 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); 1742 /* Unmask all ports */ 1743 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1744 1745 /* Update shadow table and hw entry */ 1746 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 1747 mvpp2_prs_hw_write(priv, &pe); 1748 1749 /* Default IPv4 entry for unicast address */ 1750 memset(&pe, 0, sizeof(pe)); 1751 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); 1752 pe.index = MVPP2_PE_IP4_ADDR_UN; 1753 1754 /* Finished: go to flowid generation */ 1755 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1756 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1757 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, 1758 MVPP2_PRS_RI_L3_ADDR_MASK); 1759 1760 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, 1761 MVPP2_PRS_IPV4_DIP_AI_BIT); 1762 /* Unmask all ports */ 1763 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1764 1765 /* Update shadow table and hw entry */ 1766 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 1767 mvpp2_prs_hw_write(priv, &pe); 1768 1769 return 0; 1770 } 1771 1772 /* Initialize entries for IPv6 */ 1773 static int mvpp2_prs_ip6_init(struct mvpp2 *priv) 1774 { 1775 struct mvpp2_prs_entry pe; 1776 int tid, err; 1777 1778 /* Set entries for TCP, UDP and ICMP over IPv6 */ 1779 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP, 1780 MVPP2_PRS_RI_L4_TCP, 1781 MVPP2_PRS_RI_L4_PROTO_MASK); 1782 if (err) 1783 return err; 1784 1785 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP, 1786 MVPP2_PRS_RI_L4_UDP, 1787 MVPP2_PRS_RI_L4_PROTO_MASK); 1788 if (err) 1789 return err; 1790 1791 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6, 1792 MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 1793 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 1794 MVPP2_PRS_RI_CPU_CODE_MASK | 1795 MVPP2_PRS_RI_UDF3_MASK); 1796 if (err) 1797 return err; 1798 1799 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */ 1800 /* Result Info: UDF7=1, DS lite */ 1801 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP, 1802 MVPP2_PRS_RI_UDF7_IP6_LITE, 1803 MVPP2_PRS_RI_UDF7_MASK); 1804 if (err) 1805 return err; 1806 1807 /* IPv6 multicast */ 1808 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST); 1809 if (err) 1810 return err; 1811 1812 /* Entry for checking hop limit */ 1813 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1814 MVPP2_PE_LAST_FREE_TID); 1815 if (tid < 0) 1816 return tid; 1817 1818 memset(&pe, 0, sizeof(pe)); 1819 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); 1820 pe.index = tid; 1821 1822 /* Finished: go to flowid generation */ 1823 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1824 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1825 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN | 1826 MVPP2_PRS_RI_DROP_MASK, 1827 MVPP2_PRS_RI_L3_PROTO_MASK | 1828 MVPP2_PRS_RI_DROP_MASK); 1829 1830 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK); 1831 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, 1832 MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 1833 1834 /* Update shadow table and hw entry */ 1835 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 1836 mvpp2_prs_hw_write(priv, &pe); 1837 1838 /* Default IPv6 entry for unknown protocols */ 1839 memset(&pe, 0, sizeof(pe)); 1840 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); 1841 pe.index = MVPP2_PE_IP6_PROTO_UN; 1842 1843 /* Finished: go to flowid generation */ 1844 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1845 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1846 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, 1847 MVPP2_PRS_RI_L4_PROTO_MASK); 1848 /* Set L4 offset relatively to our current place */ 1849 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, 1850 sizeof(struct ipv6hdr) - 4, 1851 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1852 1853 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, 1854 MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 1855 /* Unmask all ports */ 1856 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1857 1858 /* Update shadow table and hw entry */ 1859 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 1860 mvpp2_prs_hw_write(priv, &pe); 1861 1862 /* Default IPv6 entry for unknown ext protocols */ 1863 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1864 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); 1865 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN; 1866 1867 /* Finished: go to flowid generation */ 1868 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1869 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1870 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, 1871 MVPP2_PRS_RI_L4_PROTO_MASK); 1872 1873 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT, 1874 MVPP2_PRS_IPV6_EXT_AI_BIT); 1875 /* Unmask all ports */ 1876 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1877 1878 /* Update shadow table and hw entry */ 1879 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 1880 mvpp2_prs_hw_write(priv, &pe); 1881 1882 /* Default IPv6 entry for unicast address */ 1883 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1884 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); 1885 pe.index = MVPP2_PE_IP6_ADDR_UN; 1886 1887 /* Finished: go to IPv6 again */ 1888 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); 1889 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, 1890 MVPP2_PRS_RI_L3_ADDR_MASK); 1891 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, 1892 MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 1893 /* Shift back to IPV6 NH */ 1894 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1895 1896 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 1897 /* Unmask all ports */ 1898 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1899 1900 /* Update shadow table and hw entry */ 1901 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); 1902 mvpp2_prs_hw_write(priv, &pe); 1903 1904 return 0; 1905 } 1906 1907 /* Find tcam entry with matched pair <vid,port> */ 1908 static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid, 1909 u16 mask) 1910 { 1911 unsigned char byte[2], enable[2]; 1912 struct mvpp2_prs_entry pe; 1913 u16 rvid, rmask; 1914 int tid; 1915 1916 /* Go through the all entries with MVPP2_PRS_LU_VID */ 1917 for (tid = MVPP2_PE_VID_FILT_RANGE_START; 1918 tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) { 1919 if (!priv->prs_shadow[tid].valid || 1920 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) 1921 continue; 1922 1923 mvpp2_prs_init_from_hw(priv, &pe, tid); 1924 1925 mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); 1926 mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); 1927 1928 rvid = ((byte[0] & 0xf) << 8) + byte[1]; 1929 rmask = ((enable[0] & 0xf) << 8) + enable[1]; 1930 1931 if (rvid != vid || rmask != mask) 1932 continue; 1933 1934 return tid; 1935 } 1936 1937 return -ENOENT; 1938 } 1939 1940 /* Write parser entry for VID filtering */ 1941 int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) 1942 { 1943 unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START + 1944 port->id * MVPP2_PRS_VLAN_FILT_MAX; 1945 unsigned int mask = 0xfff, reg_val, shift; 1946 struct mvpp2 *priv = port->priv; 1947 struct mvpp2_prs_entry pe; 1948 int tid; 1949 1950 memset(&pe, 0, sizeof(pe)); 1951 1952 /* Scan TCAM and see if entry with this <vid,port> already exist */ 1953 tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask); 1954 1955 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); 1956 if (reg_val & MVPP2_DSA_EXTENDED) 1957 shift = MVPP2_VLAN_TAG_EDSA_LEN; 1958 else 1959 shift = MVPP2_VLAN_TAG_LEN; 1960 1961 /* No such entry */ 1962 if (tid < 0) { 1963 1964 /* Go through all entries from first to last in vlan range */ 1965 tid = mvpp2_prs_tcam_first_free(priv, vid_start, 1966 vid_start + 1967 MVPP2_PRS_VLAN_FILT_MAX_ENTRY); 1968 1969 /* There isn't room for a new VID filter */ 1970 if (tid < 0) 1971 return tid; 1972 1973 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); 1974 pe.index = tid; 1975 1976 /* Mask all ports */ 1977 mvpp2_prs_tcam_port_map_set(&pe, 0); 1978 } else { 1979 mvpp2_prs_init_from_hw(priv, &pe, tid); 1980 } 1981 1982 /* Enable the current port */ 1983 mvpp2_prs_tcam_port_set(&pe, port->id, true); 1984 1985 /* Continue - set next lookup */ 1986 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); 1987 1988 /* Skip VLAN header - Set offset to 4 or 8 bytes */ 1989 mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1990 1991 /* Set match on VID */ 1992 mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid); 1993 1994 /* Clear all ai bits for next iteration */ 1995 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); 1996 1997 /* Update shadow table */ 1998 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); 1999 mvpp2_prs_hw_write(priv, &pe); 2000 2001 return 0; 2002 } 2003 2004 /* Write parser entry for VID filtering */ 2005 void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid) 2006 { 2007 struct mvpp2 *priv = port->priv; 2008 int tid; 2009 2010 /* Scan TCAM and see if entry with this <vid,port> already exist */ 2011 tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff); 2012 2013 /* No such entry */ 2014 if (tid < 0) 2015 return; 2016 2017 mvpp2_prs_hw_inv(priv, tid); 2018 priv->prs_shadow[tid].valid = false; 2019 } 2020 2021 /* Remove all existing VID filters on this port */ 2022 void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) 2023 { 2024 struct mvpp2 *priv = port->priv; 2025 int tid; 2026 2027 for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); 2028 tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { 2029 if (priv->prs_shadow[tid].valid) 2030 mvpp2_prs_vid_entry_remove(port, tid); 2031 } 2032 } 2033 2034 /* Remove VID filering entry for this port */ 2035 void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port) 2036 { 2037 unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); 2038 struct mvpp2 *priv = port->priv; 2039 2040 /* Invalidate the guard entry */ 2041 mvpp2_prs_hw_inv(priv, tid); 2042 2043 priv->prs_shadow[tid].valid = false; 2044 } 2045 2046 /* Add guard entry that drops packets when no VID is matched on this port */ 2047 void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port) 2048 { 2049 unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); 2050 struct mvpp2 *priv = port->priv; 2051 unsigned int reg_val, shift; 2052 struct mvpp2_prs_entry pe; 2053 2054 if (priv->prs_shadow[tid].valid) 2055 return; 2056 2057 memset(&pe, 0, sizeof(pe)); 2058 2059 pe.index = tid; 2060 2061 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); 2062 if (reg_val & MVPP2_DSA_EXTENDED) 2063 shift = MVPP2_VLAN_TAG_EDSA_LEN; 2064 else 2065 shift = MVPP2_VLAN_TAG_LEN; 2066 2067 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); 2068 2069 /* Mask all ports */ 2070 mvpp2_prs_tcam_port_map_set(&pe, 0); 2071 2072 /* Update port mask */ 2073 mvpp2_prs_tcam_port_set(&pe, port->id, true); 2074 2075 /* Continue - set next lookup */ 2076 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); 2077 2078 /* Skip VLAN header - Set offset to 4 or 8 bytes */ 2079 mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2080 2081 /* Drop VLAN packets that don't belong to any VIDs on this port */ 2082 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 2083 MVPP2_PRS_RI_DROP_MASK); 2084 2085 /* Clear all ai bits for next iteration */ 2086 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); 2087 2088 /* Update shadow table */ 2089 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); 2090 mvpp2_prs_hw_write(priv, &pe); 2091 } 2092 2093 /* Parser default initialization */ 2094 int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) 2095 { 2096 int err, index, i; 2097 2098 /* Enable tcam table */ 2099 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); 2100 2101 /* Clear all tcam and sram entries */ 2102 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) { 2103 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); 2104 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 2105 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0); 2106 2107 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index); 2108 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 2109 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0); 2110 } 2111 2112 /* Invalidate all tcam entries */ 2113 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) 2114 mvpp2_prs_hw_inv(priv, index); 2115 2116 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, 2117 sizeof(*priv->prs_shadow), 2118 GFP_KERNEL); 2119 if (!priv->prs_shadow) 2120 return -ENOMEM; 2121 2122 /* Always start from lookup = 0 */ 2123 for (index = 0; index < MVPP2_MAX_PORTS; index++) 2124 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, 2125 MVPP2_PRS_PORT_LU_MAX, 0); 2126 2127 mvpp2_prs_def_flow_init(priv); 2128 2129 mvpp2_prs_mh_init(priv); 2130 2131 mvpp2_prs_mac_init(priv); 2132 2133 mvpp2_prs_dsa_init(priv); 2134 2135 mvpp2_prs_vid_init(priv); 2136 2137 err = mvpp2_prs_etype_init(priv); 2138 if (err) 2139 return err; 2140 2141 err = mvpp2_prs_vlan_init(pdev, priv); 2142 if (err) 2143 return err; 2144 2145 err = mvpp2_prs_pppoe_init(priv); 2146 if (err) 2147 return err; 2148 2149 err = mvpp2_prs_ip6_init(priv); 2150 if (err) 2151 return err; 2152 2153 err = mvpp2_prs_ip4_init(priv); 2154 if (err) 2155 return err; 2156 2157 return 0; 2158 } 2159 2160 /* Compare MAC DA with tcam entry data */ 2161 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, 2162 const u8 *da, unsigned char *mask) 2163 { 2164 unsigned char tcam_byte, tcam_mask; 2165 int index; 2166 2167 for (index = 0; index < ETH_ALEN; index++) { 2168 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask); 2169 if (tcam_mask != mask[index]) 2170 return false; 2171 2172 if ((tcam_mask & tcam_byte) != (da[index] & mask[index])) 2173 return false; 2174 } 2175 2176 return true; 2177 } 2178 2179 /* Find tcam entry with matched pair <MAC DA, port> */ 2180 static int 2181 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, 2182 unsigned char *mask, int udf_type) 2183 { 2184 struct mvpp2_prs_entry pe; 2185 int tid; 2186 2187 /* Go through the all entires with MVPP2_PRS_LU_MAC */ 2188 for (tid = MVPP2_PE_MAC_RANGE_START; 2189 tid <= MVPP2_PE_MAC_RANGE_END; tid++) { 2190 unsigned int entry_pmap; 2191 2192 if (!priv->prs_shadow[tid].valid || 2193 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || 2194 (priv->prs_shadow[tid].udf != udf_type)) 2195 continue; 2196 2197 mvpp2_prs_init_from_hw(priv, &pe, tid); 2198 entry_pmap = mvpp2_prs_tcam_port_map_get(&pe); 2199 2200 if (mvpp2_prs_mac_range_equals(&pe, da, mask) && 2201 entry_pmap == pmap) 2202 return tid; 2203 } 2204 2205 return -ENOENT; 2206 } 2207 2208 /* Update parser's mac da entry */ 2209 int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) 2210 { 2211 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 2212 struct mvpp2 *priv = port->priv; 2213 unsigned int pmap, len, ri; 2214 struct mvpp2_prs_entry pe; 2215 int tid; 2216 2217 memset(&pe, 0, sizeof(pe)); 2218 2219 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */ 2220 tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask, 2221 MVPP2_PRS_UDF_MAC_DEF); 2222 2223 /* No such entry */ 2224 if (tid < 0) { 2225 if (!add) 2226 return 0; 2227 2228 /* Create new TCAM entry */ 2229 /* Go through the all entries from first to last */ 2230 tid = mvpp2_prs_tcam_first_free(priv, 2231 MVPP2_PE_MAC_RANGE_START, 2232 MVPP2_PE_MAC_RANGE_END); 2233 if (tid < 0) 2234 return tid; 2235 2236 pe.index = tid; 2237 2238 /* Mask all ports */ 2239 mvpp2_prs_tcam_port_map_set(&pe, 0); 2240 } else { 2241 mvpp2_prs_init_from_hw(priv, &pe, tid); 2242 } 2243 2244 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 2245 2246 /* Update port mask */ 2247 mvpp2_prs_tcam_port_set(&pe, port->id, add); 2248 2249 /* Invalidate the entry if no ports are left enabled */ 2250 pmap = mvpp2_prs_tcam_port_map_get(&pe); 2251 if (pmap == 0) { 2252 if (add) 2253 return -EINVAL; 2254 2255 mvpp2_prs_hw_inv(priv, pe.index); 2256 priv->prs_shadow[pe.index].valid = false; 2257 return 0; 2258 } 2259 2260 /* Continue - set next lookup */ 2261 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); 2262 2263 /* Set match on DA */ 2264 len = ETH_ALEN; 2265 while (len--) 2266 mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff); 2267 2268 /* Set result info bits */ 2269 if (is_broadcast_ether_addr(da)) { 2270 ri = MVPP2_PRS_RI_L2_BCAST; 2271 } else if (is_multicast_ether_addr(da)) { 2272 ri = MVPP2_PRS_RI_L2_MCAST; 2273 } else { 2274 ri = MVPP2_PRS_RI_L2_UCAST; 2275 2276 if (ether_addr_equal(da, port->dev->dev_addr)) 2277 ri |= MVPP2_PRS_RI_MAC_ME_MASK; 2278 } 2279 2280 mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | 2281 MVPP2_PRS_RI_MAC_ME_MASK); 2282 mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK | 2283 MVPP2_PRS_RI_MAC_ME_MASK); 2284 2285 /* Shift to ethertype */ 2286 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, 2287 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2288 2289 /* Update shadow table and hw entry */ 2290 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF; 2291 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 2292 mvpp2_prs_hw_write(priv, &pe); 2293 2294 return 0; 2295 } 2296 2297 int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) 2298 { 2299 struct mvpp2_port *port = netdev_priv(dev); 2300 int err; 2301 2302 /* Remove old parser entry */ 2303 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false); 2304 if (err) 2305 return err; 2306 2307 /* Add new parser entry */ 2308 err = mvpp2_prs_mac_da_accept(port, da, true); 2309 if (err) 2310 return err; 2311 2312 /* Set addr in the device */ 2313 ether_addr_copy(dev->dev_addr, da); 2314 2315 return 0; 2316 } 2317 2318 void mvpp2_prs_mac_del_all(struct mvpp2_port *port) 2319 { 2320 struct mvpp2 *priv = port->priv; 2321 struct mvpp2_prs_entry pe; 2322 unsigned long pmap; 2323 int index, tid; 2324 2325 for (tid = MVPP2_PE_MAC_RANGE_START; 2326 tid <= MVPP2_PE_MAC_RANGE_END; tid++) { 2327 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; 2328 2329 if (!priv->prs_shadow[tid].valid || 2330 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || 2331 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)) 2332 continue; 2333 2334 mvpp2_prs_init_from_hw(priv, &pe, tid); 2335 2336 pmap = mvpp2_prs_tcam_port_map_get(&pe); 2337 2338 /* We only want entries active on this port */ 2339 if (!test_bit(port->id, &pmap)) 2340 continue; 2341 2342 /* Read mac addr from entry */ 2343 for (index = 0; index < ETH_ALEN; index++) 2344 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index], 2345 &da_mask[index]); 2346 2347 /* Special cases : Don't remove broadcast and port's own 2348 * address 2349 */ 2350 if (is_broadcast_ether_addr(da) || 2351 ether_addr_equal(da, port->dev->dev_addr)) 2352 continue; 2353 2354 /* Remove entry from TCAM */ 2355 mvpp2_prs_mac_da_accept(port, da, false); 2356 } 2357 } 2358 2359 int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) 2360 { 2361 switch (type) { 2362 case MVPP2_TAG_TYPE_EDSA: 2363 /* Add port to EDSA entries */ 2364 mvpp2_prs_dsa_tag_set(priv, port, true, 2365 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); 2366 mvpp2_prs_dsa_tag_set(priv, port, true, 2367 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); 2368 /* Remove port from DSA entries */ 2369 mvpp2_prs_dsa_tag_set(priv, port, false, 2370 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); 2371 mvpp2_prs_dsa_tag_set(priv, port, false, 2372 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); 2373 break; 2374 2375 case MVPP2_TAG_TYPE_DSA: 2376 /* Add port to DSA entries */ 2377 mvpp2_prs_dsa_tag_set(priv, port, true, 2378 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); 2379 mvpp2_prs_dsa_tag_set(priv, port, true, 2380 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); 2381 /* Remove port from EDSA entries */ 2382 mvpp2_prs_dsa_tag_set(priv, port, false, 2383 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); 2384 mvpp2_prs_dsa_tag_set(priv, port, false, 2385 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); 2386 break; 2387 2388 case MVPP2_TAG_TYPE_MH: 2389 case MVPP2_TAG_TYPE_NONE: 2390 /* Remove port form EDSA and DSA entries */ 2391 mvpp2_prs_dsa_tag_set(priv, port, false, 2392 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); 2393 mvpp2_prs_dsa_tag_set(priv, port, false, 2394 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); 2395 mvpp2_prs_dsa_tag_set(priv, port, false, 2396 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); 2397 mvpp2_prs_dsa_tag_set(priv, port, false, 2398 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); 2399 break; 2400 2401 default: 2402 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA)) 2403 return -EINVAL; 2404 } 2405 2406 return 0; 2407 } 2408 2409 int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask) 2410 { 2411 struct mvpp2_prs_entry pe; 2412 u8 *ri_byte, *ri_byte_mask; 2413 int tid, i; 2414 2415 memset(&pe, 0, sizeof(pe)); 2416 2417 tid = mvpp2_prs_tcam_first_free(priv, 2418 MVPP2_PE_LAST_FREE_TID, 2419 MVPP2_PE_FIRST_FREE_TID); 2420 if (tid < 0) 2421 return tid; 2422 2423 pe.index = tid; 2424 2425 ri_byte = (u8 *)&ri; 2426 ri_byte_mask = (u8 *)&ri_mask; 2427 2428 mvpp2_prs_sram_ai_update(&pe, flow, MVPP2_PRS_FLOW_ID_MASK); 2429 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 2430 2431 for (i = 0; i < 4; i++) { 2432 mvpp2_prs_tcam_data_byte_set(&pe, i, ri_byte[i], 2433 ri_byte_mask[i]); 2434 } 2435 2436 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); 2437 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2438 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 2439 mvpp2_prs_hw_write(priv, &pe); 2440 2441 return 0; 2442 } 2443 2444 /* Set prs flow for the port */ 2445 int mvpp2_prs_def_flow(struct mvpp2_port *port) 2446 { 2447 struct mvpp2_prs_entry pe; 2448 int tid; 2449 2450 memset(&pe, 0, sizeof(pe)); 2451 2452 tid = mvpp2_prs_flow_find(port->priv, port->id); 2453 2454 /* Such entry not exist */ 2455 if (tid < 0) { 2456 /* Go through the all entires from last to first */ 2457 tid = mvpp2_prs_tcam_first_free(port->priv, 2458 MVPP2_PE_LAST_FREE_TID, 2459 MVPP2_PE_FIRST_FREE_TID); 2460 if (tid < 0) 2461 return tid; 2462 2463 pe.index = tid; 2464 2465 /* Set flow ID*/ 2466 mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK); 2467 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 2468 2469 /* Update shadow table */ 2470 mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS); 2471 } else { 2472 mvpp2_prs_init_from_hw(port->priv, &pe, tid); 2473 } 2474 2475 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2476 mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id)); 2477 mvpp2_prs_hw_write(port->priv, &pe); 2478 2479 return 0; 2480 } 2481 2482 int mvpp2_prs_hits(struct mvpp2 *priv, int index) 2483 { 2484 u32 val; 2485 2486 if (index > MVPP2_PRS_TCAM_SRAM_SIZE) 2487 return -EINVAL; 2488 2489 mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index); 2490 2491 val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG); 2492 2493 val &= MVPP2_PRS_TCAM_HIT_CNT_MASK; 2494 2495 return val; 2496 } 2497