1 /* 2 * Broadcom Starfighter 2 DSA switch CFP support 3 * 4 * Copyright (C) 2016, Broadcom 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/list.h> 13 #include <linux/ethtool.h> 14 #include <linux/if_ether.h> 15 #include <linux/in.h> 16 #include <linux/netdevice.h> 17 #include <net/dsa.h> 18 #include <linux/bitmap.h> 19 20 #include "bcm_sf2.h" 21 #include "bcm_sf2_regs.h" 22 23 struct cfp_udf_slice_layout { 24 u8 slices[UDFS_PER_SLICE]; 25 u32 mask_value; 26 u32 base_offset; 27 }; 28 29 struct cfp_udf_layout { 30 struct cfp_udf_slice_layout udfs[UDF_NUM_SLICES]; 31 }; 32 33 static const u8 zero_slice[UDFS_PER_SLICE] = { }; 34 35 /* UDF slices layout for a TCPv4/UDPv4 specification */ 36 static const struct cfp_udf_layout udf_tcpip4_layout = { 37 .udfs = { 38 [1] = { 39 .slices = { 40 /* End of L2, byte offset 12, src IP[0:15] */ 41 CFG_UDF_EOL2 | 6, 42 /* End of L2, byte offset 14, src IP[16:31] */ 43 CFG_UDF_EOL2 | 7, 44 /* End of L2, byte offset 16, dst IP[0:15] */ 45 CFG_UDF_EOL2 | 8, 46 /* End of L2, byte offset 18, dst IP[16:31] */ 47 CFG_UDF_EOL2 | 9, 48 /* End of L3, byte offset 0, src port */ 49 CFG_UDF_EOL3 | 0, 50 /* End of L3, byte offset 2, dst port */ 51 CFG_UDF_EOL3 | 1, 52 0, 0, 0 53 }, 54 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG, 55 .base_offset = CORE_UDF_0_A_0_8_PORT_0 + UDF_SLICE_OFFSET, 56 }, 57 }, 58 }; 59 60 /* UDF slices layout for a TCPv6/UDPv6 specification */ 61 static const struct cfp_udf_layout udf_tcpip6_layout = { 62 .udfs = { 63 [0] = { 64 .slices = { 65 /* End of L2, byte offset 8, src IP[0:15] */ 66 CFG_UDF_EOL2 | 4, 67 /* End of L2, byte offset 10, src IP[16:31] */ 68 CFG_UDF_EOL2 | 5, 69 /* End of L2, byte offset 12, src IP[32:47] */ 70 CFG_UDF_EOL2 | 6, 71 /* End of L2, byte offset 14, src IP[48:63] */ 72 CFG_UDF_EOL2 | 7, 73 /* End of L2, byte offset 16, src IP[64:79] */ 74 CFG_UDF_EOL2 | 8, 75 /* End of L2, byte offset 18, src IP[80:95] */ 76 CFG_UDF_EOL2 | 9, 77 /* End of L2, byte offset 20, src IP[96:111] */ 78 CFG_UDF_EOL2 | 10, 79 /* End of L2, byte offset 22, src IP[112:127] */ 80 CFG_UDF_EOL2 | 11, 81 /* End of L3, byte offset 0, src port */ 82 CFG_UDF_EOL3 | 0, 83 }, 84 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG, 85 .base_offset = CORE_UDF_0_B_0_8_PORT_0, 86 }, 87 [3] = { 88 .slices = { 89 /* End of L2, byte offset 24, dst IP[0:15] */ 90 CFG_UDF_EOL2 | 12, 91 /* End of L2, byte offset 26, dst IP[16:31] */ 92 CFG_UDF_EOL2 | 13, 93 /* End of L2, byte offset 28, dst IP[32:47] */ 94 CFG_UDF_EOL2 | 14, 95 /* End of L2, byte offset 30, dst IP[48:63] */ 96 CFG_UDF_EOL2 | 15, 97 /* End of L2, byte offset 32, dst IP[64:79] */ 98 CFG_UDF_EOL2 | 16, 99 /* End of L2, byte offset 34, dst IP[80:95] */ 100 CFG_UDF_EOL2 | 17, 101 /* End of L2, byte offset 36, dst IP[96:111] */ 102 CFG_UDF_EOL2 | 18, 103 /* End of L2, byte offset 38, dst IP[112:127] */ 104 CFG_UDF_EOL2 | 19, 105 /* End of L3, byte offset 2, dst port */ 106 CFG_UDF_EOL3 | 1, 107 }, 108 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG, 109 .base_offset = CORE_UDF_0_D_0_11_PORT_0, 110 }, 111 }, 112 }; 113 114 static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout) 115 { 116 unsigned int i, count = 0; 117 118 for (i = 0; i < UDFS_PER_SLICE; i++) { 119 if (layout[i] != 0) 120 count++; 121 } 122 123 return count; 124 } 125 126 static inline u32 udf_upper_bits(unsigned int num_udf) 127 { 128 return GENMASK(num_udf - 1, 0) >> (UDFS_PER_SLICE - 1); 129 } 130 131 static inline u32 udf_lower_bits(unsigned int num_udf) 132 { 133 return (u8)GENMASK(num_udf - 1, 0); 134 } 135 136 static unsigned int bcm_sf2_get_slice_number(const struct cfp_udf_layout *l, 137 unsigned int start) 138 { 139 const struct cfp_udf_slice_layout *slice_layout; 140 unsigned int slice_idx; 141 142 for (slice_idx = start; slice_idx < UDF_NUM_SLICES; slice_idx++) { 143 slice_layout = &l->udfs[slice_idx]; 144 if (memcmp(slice_layout->slices, zero_slice, 145 sizeof(zero_slice))) 146 break; 147 } 148 149 return slice_idx; 150 } 151 152 static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv, 153 const struct cfp_udf_layout *layout, 154 unsigned int slice_num) 155 { 156 u32 offset = layout->udfs[slice_num].base_offset; 157 unsigned int i; 158 159 for (i = 0; i < UDFS_PER_SLICE; i++) 160 core_writel(priv, layout->udfs[slice_num].slices[i], 161 offset + i * 4); 162 } 163 164 static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op) 165 { 166 unsigned int timeout = 1000; 167 u32 reg; 168 169 reg = core_readl(priv, CORE_CFP_ACC); 170 reg &= ~(OP_SEL_MASK | RAM_SEL_MASK); 171 reg |= OP_STR_DONE | op; 172 core_writel(priv, reg, CORE_CFP_ACC); 173 174 do { 175 reg = core_readl(priv, CORE_CFP_ACC); 176 if (!(reg & OP_STR_DONE)) 177 break; 178 179 cpu_relax(); 180 } while (timeout--); 181 182 if (!timeout) 183 return -ETIMEDOUT; 184 185 return 0; 186 } 187 188 static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv, 189 unsigned int addr) 190 { 191 u32 reg; 192 193 WARN_ON(addr >= priv->num_cfp_rules); 194 195 reg = core_readl(priv, CORE_CFP_ACC); 196 reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT); 197 reg |= addr << XCESS_ADDR_SHIFT; 198 core_writel(priv, reg, CORE_CFP_ACC); 199 } 200 201 static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv) 202 { 203 /* Entry #0 is reserved */ 204 return priv->num_cfp_rules - 1; 205 } 206 207 static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv, 208 unsigned int rule_index, 209 unsigned int port_num, 210 unsigned int queue_num, 211 bool fwd_map_change) 212 { 213 int ret; 214 u32 reg; 215 216 /* Replace ARL derived destination with DST_MAP derived, define 217 * which port and queue this should be forwarded to. 218 */ 219 if (fwd_map_change) 220 reg = CHANGE_FWRD_MAP_IB_REP_ARL | 221 BIT(port_num + DST_MAP_IB_SHIFT) | 222 CHANGE_TC | queue_num << NEW_TC_SHIFT; 223 else 224 reg = 0; 225 226 core_writel(priv, reg, CORE_ACT_POL_DATA0); 227 228 /* Set classification ID that needs to be put in Broadcom tag */ 229 core_writel(priv, rule_index << CHAIN_ID_SHIFT, CORE_ACT_POL_DATA1); 230 231 core_writel(priv, 0, CORE_ACT_POL_DATA2); 232 233 /* Configure policer RAM now */ 234 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM); 235 if (ret) { 236 pr_err("Policer entry at %d failed\n", rule_index); 237 return ret; 238 } 239 240 /* Disable the policer */ 241 core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0); 242 243 /* Now the rate meter */ 244 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM); 245 if (ret) { 246 pr_err("Meter entry at %d failed\n", rule_index); 247 return ret; 248 } 249 250 return 0; 251 } 252 253 static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv, 254 struct ethtool_tcpip4_spec *v4_spec, 255 unsigned int slice_num, 256 bool mask) 257 { 258 u32 reg, offset; 259 260 /* C-Tag [31:24] 261 * UDF_n_A8 [23:8] 262 * UDF_n_A7 [7:0] 263 */ 264 reg = 0; 265 if (mask) 266 offset = CORE_CFP_MASK_PORT(4); 267 else 268 offset = CORE_CFP_DATA_PORT(4); 269 core_writel(priv, reg, offset); 270 271 /* UDF_n_A7 [31:24] 272 * UDF_n_A6 [23:8] 273 * UDF_n_A5 [7:0] 274 */ 275 reg = be16_to_cpu(v4_spec->pdst) >> 8; 276 if (mask) 277 offset = CORE_CFP_MASK_PORT(3); 278 else 279 offset = CORE_CFP_DATA_PORT(3); 280 core_writel(priv, reg, offset); 281 282 /* UDF_n_A5 [31:24] 283 * UDF_n_A4 [23:8] 284 * UDF_n_A3 [7:0] 285 */ 286 reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 | 287 (u32)be16_to_cpu(v4_spec->psrc) << 8 | 288 (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8; 289 if (mask) 290 offset = CORE_CFP_MASK_PORT(2); 291 else 292 offset = CORE_CFP_DATA_PORT(2); 293 core_writel(priv, reg, offset); 294 295 /* UDF_n_A3 [31:24] 296 * UDF_n_A2 [23:8] 297 * UDF_n_A1 [7:0] 298 */ 299 reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 | 300 (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 | 301 (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8; 302 if (mask) 303 offset = CORE_CFP_MASK_PORT(1); 304 else 305 offset = CORE_CFP_DATA_PORT(1); 306 core_writel(priv, reg, offset); 307 308 /* UDF_n_A1 [31:24] 309 * UDF_n_A0 [23:8] 310 * Reserved [7:4] 311 * Slice ID [3:2] 312 * Slice valid [1:0] 313 */ 314 reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 | 315 (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 | 316 SLICE_NUM(slice_num) | SLICE_VALID; 317 if (mask) 318 offset = CORE_CFP_MASK_PORT(0); 319 else 320 offset = CORE_CFP_DATA_PORT(0); 321 core_writel(priv, reg, offset); 322 } 323 324 static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, 325 unsigned int port_num, 326 unsigned int queue_num, 327 struct ethtool_rx_flow_spec *fs) 328 { 329 struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec; 330 const struct cfp_udf_layout *layout; 331 unsigned int slice_num, rule_index; 332 u8 ip_proto, ip_frag; 333 u8 num_udf; 334 u32 reg; 335 int ret; 336 337 switch (fs->flow_type & ~FLOW_EXT) { 338 case TCP_V4_FLOW: 339 ip_proto = IPPROTO_TCP; 340 v4_spec = &fs->h_u.tcp_ip4_spec; 341 v4_m_spec = &fs->m_u.tcp_ip4_spec; 342 break; 343 case UDP_V4_FLOW: 344 ip_proto = IPPROTO_UDP; 345 v4_spec = &fs->h_u.udp_ip4_spec; 346 v4_m_spec = &fs->m_u.udp_ip4_spec; 347 break; 348 default: 349 return -EINVAL; 350 } 351 352 ip_frag = be32_to_cpu(fs->m_ext.data[0]); 353 354 /* Locate the first rule available */ 355 if (fs->location == RX_CLS_LOC_ANY) 356 rule_index = find_first_zero_bit(priv->cfp.used, 357 bcm_sf2_cfp_rule_size(priv)); 358 else 359 rule_index = fs->location; 360 361 layout = &udf_tcpip4_layout; 362 /* We only use one UDF slice for now */ 363 slice_num = bcm_sf2_get_slice_number(layout, 0); 364 if (slice_num == UDF_NUM_SLICES) 365 return -EINVAL; 366 367 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices); 368 369 /* Apply the UDF layout for this filter */ 370 bcm_sf2_cfp_udf_set(priv, layout, slice_num); 371 372 /* Apply to all packets received through this port */ 373 core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7)); 374 375 /* Source port map match */ 376 core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7)); 377 378 /* S-Tag status [31:30] 379 * C-Tag status [29:28] 380 * L2 framing [27:26] 381 * L3 framing [25:24] 382 * IP ToS [23:16] 383 * IP proto [15:08] 384 * IP Fragm [7] 385 * Non 1st frag [6] 386 * IP Authen [5] 387 * TTL range [4:3] 388 * PPPoE session [2] 389 * Reserved [1] 390 * UDF_Valid[8] [0] 391 */ 392 core_writel(priv, v4_spec->tos << IPTOS_SHIFT | 393 ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT | 394 udf_upper_bits(num_udf), 395 CORE_CFP_DATA_PORT(6)); 396 397 /* Mask with the specific layout for IPv4 packets */ 398 core_writel(priv, layout->udfs[slice_num].mask_value | 399 udf_upper_bits(num_udf), CORE_CFP_MASK_PORT(6)); 400 401 /* UDF_Valid[7:0] [31:24] 402 * S-Tag [23:8] 403 * C-Tag [7:0] 404 */ 405 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_DATA_PORT(5)); 406 407 /* Mask all but valid UDFs */ 408 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5)); 409 410 /* Program the match and the mask */ 411 bcm_sf2_cfp_slice_ipv4(priv, v4_spec, slice_num, false); 412 bcm_sf2_cfp_slice_ipv4(priv, v4_m_spec, SLICE_NUM_MASK, true); 413 414 /* Insert into TCAM now */ 415 bcm_sf2_cfp_rule_addr_set(priv, rule_index); 416 417 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); 418 if (ret) { 419 pr_err("TCAM entry at addr %d failed\n", rule_index); 420 return ret; 421 } 422 423 /* Insert into Action and policer RAMs now */ 424 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port_num, 425 queue_num, true); 426 if (ret) 427 return ret; 428 429 /* Turn on CFP for this rule now */ 430 reg = core_readl(priv, CORE_CFP_CTL_REG); 431 reg |= BIT(port); 432 core_writel(priv, reg, CORE_CFP_CTL_REG); 433 434 /* Flag the rule as being used and return it */ 435 set_bit(rule_index, priv->cfp.used); 436 set_bit(rule_index, priv->cfp.unique); 437 fs->location = rule_index; 438 439 return 0; 440 } 441 442 static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv, 443 const __be32 *ip6_addr, const __be16 port, 444 unsigned int slice_num, 445 bool mask) 446 { 447 u32 reg, tmp, val, offset; 448 449 /* C-Tag [31:24] 450 * UDF_n_B8 [23:8] (port) 451 * UDF_n_B7 (upper) [7:0] (addr[15:8]) 452 */ 453 reg = be32_to_cpu(ip6_addr[3]); 454 val = (u32)be16_to_cpu(port) << 8 | ((reg >> 8) & 0xff); 455 if (mask) 456 offset = CORE_CFP_MASK_PORT(4); 457 else 458 offset = CORE_CFP_DATA_PORT(4); 459 core_writel(priv, val, offset); 460 461 /* UDF_n_B7 (lower) [31:24] (addr[7:0]) 462 * UDF_n_B6 [23:8] (addr[31:16]) 463 * UDF_n_B5 (upper) [7:0] (addr[47:40]) 464 */ 465 tmp = be32_to_cpu(ip6_addr[2]); 466 val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 | 467 ((tmp >> 8) & 0xff); 468 if (mask) 469 offset = CORE_CFP_MASK_PORT(3); 470 else 471 offset = CORE_CFP_DATA_PORT(3); 472 core_writel(priv, val, offset); 473 474 /* UDF_n_B5 (lower) [31:24] (addr[39:32]) 475 * UDF_n_B4 [23:8] (addr[63:48]) 476 * UDF_n_B3 (upper) [7:0] (addr[79:72]) 477 */ 478 reg = be32_to_cpu(ip6_addr[1]); 479 val = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 | 480 ((reg >> 8) & 0xff); 481 if (mask) 482 offset = CORE_CFP_MASK_PORT(2); 483 else 484 offset = CORE_CFP_DATA_PORT(2); 485 core_writel(priv, val, offset); 486 487 /* UDF_n_B3 (lower) [31:24] (addr[71:64]) 488 * UDF_n_B2 [23:8] (addr[95:80]) 489 * UDF_n_B1 (upper) [7:0] (addr[111:104]) 490 */ 491 tmp = be32_to_cpu(ip6_addr[0]); 492 val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 | 493 ((tmp >> 8) & 0xff); 494 if (mask) 495 offset = CORE_CFP_MASK_PORT(1); 496 else 497 offset = CORE_CFP_DATA_PORT(1); 498 core_writel(priv, val, offset); 499 500 /* UDF_n_B1 (lower) [31:24] (addr[103:96]) 501 * UDF_n_B0 [23:8] (addr[127:112]) 502 * Reserved [7:4] 503 * Slice ID [3:2] 504 * Slice valid [1:0] 505 */ 506 reg = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 | 507 SLICE_NUM(slice_num) | SLICE_VALID; 508 if (mask) 509 offset = CORE_CFP_MASK_PORT(0); 510 else 511 offset = CORE_CFP_DATA_PORT(0); 512 core_writel(priv, reg, offset); 513 } 514 515 static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, 516 unsigned int port_num, 517 unsigned int queue_num, 518 struct ethtool_rx_flow_spec *fs) 519 { 520 struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec; 521 unsigned int slice_num, rule_index[2]; 522 const struct cfp_udf_layout *layout; 523 u8 ip_proto, ip_frag; 524 int ret = 0; 525 u8 num_udf; 526 u32 reg; 527 528 switch (fs->flow_type & ~FLOW_EXT) { 529 case TCP_V6_FLOW: 530 ip_proto = IPPROTO_TCP; 531 v6_spec = &fs->h_u.tcp_ip6_spec; 532 v6_m_spec = &fs->m_u.tcp_ip6_spec; 533 break; 534 case UDP_V6_FLOW: 535 ip_proto = IPPROTO_UDP; 536 v6_spec = &fs->h_u.udp_ip6_spec; 537 v6_m_spec = &fs->m_u.udp_ip6_spec; 538 break; 539 default: 540 return -EINVAL; 541 } 542 543 ip_frag = be32_to_cpu(fs->m_ext.data[0]); 544 545 layout = &udf_tcpip6_layout; 546 slice_num = bcm_sf2_get_slice_number(layout, 0); 547 if (slice_num == UDF_NUM_SLICES) 548 return -EINVAL; 549 550 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices); 551 552 /* Negotiate two indexes, one for the second half which we are chained 553 * from, which is what we will return to user-space, and a second one 554 * which is used to store its first half. That first half does not 555 * allow any choice of placement, so it just needs to find the next 556 * available bit. We return the second half as fs->location because 557 * that helps with the rule lookup later on since the second half is 558 * chained from its first half, we can easily identify IPv6 CFP rules 559 * by looking whether they carry a CHAIN_ID. 560 * 561 * We also want the second half to have a lower rule_index than its 562 * first half because the HW search is by incrementing addresses. 563 */ 564 if (fs->location == RX_CLS_LOC_ANY) 565 rule_index[0] = find_first_zero_bit(priv->cfp.used, 566 bcm_sf2_cfp_rule_size(priv)); 567 else 568 rule_index[0] = fs->location; 569 570 /* Flag it as used (cleared on error path) such that we can immediately 571 * obtain a second one to chain from. 572 */ 573 set_bit(rule_index[0], priv->cfp.used); 574 575 rule_index[1] = find_first_zero_bit(priv->cfp.used, 576 bcm_sf2_cfp_rule_size(priv)); 577 if (rule_index[1] > bcm_sf2_cfp_rule_size(priv)) { 578 ret = -ENOSPC; 579 goto out_err; 580 } 581 582 /* Apply the UDF layout for this filter */ 583 bcm_sf2_cfp_udf_set(priv, layout, slice_num); 584 585 /* Apply to all packets received through this port */ 586 core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7)); 587 588 /* Source port map match */ 589 core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7)); 590 591 /* S-Tag status [31:30] 592 * C-Tag status [29:28] 593 * L2 framing [27:26] 594 * L3 framing [25:24] 595 * IP ToS [23:16] 596 * IP proto [15:08] 597 * IP Fragm [7] 598 * Non 1st frag [6] 599 * IP Authen [5] 600 * TTL range [4:3] 601 * PPPoE session [2] 602 * Reserved [1] 603 * UDF_Valid[8] [0] 604 */ 605 reg = 1 << L3_FRAMING_SHIFT | ip_proto << IPPROTO_SHIFT | 606 ip_frag << IP_FRAG_SHIFT | udf_upper_bits(num_udf); 607 core_writel(priv, reg, CORE_CFP_DATA_PORT(6)); 608 609 /* Mask with the specific layout for IPv6 packets including 610 * UDF_Valid[8] 611 */ 612 reg = layout->udfs[slice_num].mask_value | udf_upper_bits(num_udf); 613 core_writel(priv, reg, CORE_CFP_MASK_PORT(6)); 614 615 /* UDF_Valid[7:0] [31:24] 616 * S-Tag [23:8] 617 * C-Tag [7:0] 618 */ 619 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_DATA_PORT(5)); 620 621 /* Mask all but valid UDFs */ 622 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5)); 623 624 /* Slice the IPv6 source address and port */ 625 bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc, 626 slice_num, false); 627 bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc, 628 SLICE_NUM_MASK, true); 629 630 /* Insert into TCAM now because we need to insert a second rule */ 631 bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]); 632 633 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); 634 if (ret) { 635 pr_err("TCAM entry at addr %d failed\n", rule_index[0]); 636 goto out_err; 637 } 638 639 /* Insert into Action and policer RAMs now */ 640 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num, 641 queue_num, false); 642 if (ret) 643 goto out_err; 644 645 /* Now deal with the second slice to chain this rule */ 646 slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1); 647 if (slice_num == UDF_NUM_SLICES) { 648 ret = -EINVAL; 649 goto out_err; 650 } 651 652 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices); 653 654 /* Apply the UDF layout for this filter */ 655 bcm_sf2_cfp_udf_set(priv, layout, slice_num); 656 657 /* Chained rule, source port match is coming from the rule we are 658 * chained from. 659 */ 660 core_writel(priv, 0, CORE_CFP_DATA_PORT(7)); 661 core_writel(priv, 0, CORE_CFP_MASK_PORT(7)); 662 663 /* 664 * CHAIN ID [31:24] chain to previous slice 665 * Reserved [23:20] 666 * UDF_Valid[11:8] [19:16] 667 * UDF_Valid[7:0] [15:8] 668 * UDF_n_D11 [7:0] 669 */ 670 reg = rule_index[0] << 24 | udf_upper_bits(num_udf) << 16 | 671 udf_lower_bits(num_udf) << 8; 672 core_writel(priv, reg, CORE_CFP_DATA_PORT(6)); 673 674 /* Mask all except chain ID, UDF Valid[8] and UDF Valid[7:0] */ 675 reg = XCESS_ADDR_MASK << 24 | udf_upper_bits(num_udf) << 16 | 676 udf_lower_bits(num_udf) << 8; 677 core_writel(priv, reg, CORE_CFP_MASK_PORT(6)); 678 679 /* Don't care */ 680 core_writel(priv, 0, CORE_CFP_DATA_PORT(5)); 681 682 /* Mask all */ 683 core_writel(priv, 0, CORE_CFP_MASK_PORT(5)); 684 685 bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6dst, v6_spec->pdst, slice_num, 686 false); 687 bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6dst, v6_m_spec->pdst, 688 SLICE_NUM_MASK, true); 689 690 /* Insert into TCAM now */ 691 bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]); 692 693 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); 694 if (ret) { 695 pr_err("TCAM entry at addr %d failed\n", rule_index[1]); 696 goto out_err; 697 } 698 699 /* Insert into Action and policer RAMs now, set chain ID to 700 * the one we are chained to 701 */ 702 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port_num, 703 queue_num, true); 704 if (ret) 705 goto out_err; 706 707 /* Turn on CFP for this rule now */ 708 reg = core_readl(priv, CORE_CFP_CTL_REG); 709 reg |= BIT(port); 710 core_writel(priv, reg, CORE_CFP_CTL_REG); 711 712 /* Flag the second half rule as being used now, return it as the 713 * location, and flag it as unique while dumping rules 714 */ 715 set_bit(rule_index[1], priv->cfp.used); 716 set_bit(rule_index[1], priv->cfp.unique); 717 fs->location = rule_index[1]; 718 719 return ret; 720 721 out_err: 722 clear_bit(rule_index[0], priv->cfp.used); 723 return ret; 724 } 725 726 static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, 727 struct ethtool_rx_flow_spec *fs) 728 { 729 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 730 unsigned int queue_num, port_num; 731 int ret = -EINVAL; 732 733 /* Check for unsupported extensions */ 734 if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype || 735 fs->m_ext.data[1])) 736 return -EINVAL; 737 738 if (fs->location != RX_CLS_LOC_ANY && 739 test_bit(fs->location, priv->cfp.used)) 740 return -EBUSY; 741 742 if (fs->location != RX_CLS_LOC_ANY && 743 fs->location > bcm_sf2_cfp_rule_size(priv)) 744 return -EINVAL; 745 746 /* We do not support discarding packets, check that the 747 * destination port is enabled and that we are within the 748 * number of ports supported by the switch 749 */ 750 port_num = fs->ring_cookie / SF2_NUM_EGRESS_QUEUES; 751 752 if (fs->ring_cookie == RX_CLS_FLOW_DISC || 753 !dsa_is_user_port(ds, port_num) || 754 port_num >= priv->hw_params.num_ports) 755 return -EINVAL; 756 /* 757 * We have a small oddity where Port 6 just does not have a 758 * valid bit here (so we substract by one). 759 */ 760 queue_num = fs->ring_cookie % SF2_NUM_EGRESS_QUEUES; 761 if (port_num >= 7) 762 port_num -= 1; 763 764 switch (fs->flow_type & ~FLOW_EXT) { 765 case TCP_V4_FLOW: 766 case UDP_V4_FLOW: 767 ret = bcm_sf2_cfp_ipv4_rule_set(priv, port, port_num, 768 queue_num, fs); 769 break; 770 case TCP_V6_FLOW: 771 case UDP_V6_FLOW: 772 ret = bcm_sf2_cfp_ipv6_rule_set(priv, port, port_num, 773 queue_num, fs); 774 break; 775 default: 776 break; 777 } 778 779 return ret; 780 } 781 782 static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port, 783 u32 loc, u32 *next_loc) 784 { 785 int ret; 786 u32 reg; 787 788 /* Refuse deletion of unused rules, and the default reserved rule */ 789 if (!test_bit(loc, priv->cfp.used) || loc == 0) 790 return -EINVAL; 791 792 /* Indicate which rule we want to read */ 793 bcm_sf2_cfp_rule_addr_set(priv, loc); 794 795 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL); 796 if (ret) 797 return ret; 798 799 /* Check if this is possibly an IPv6 rule that would 800 * indicate we need to delete its companion rule 801 * as well 802 */ 803 reg = core_readl(priv, CORE_CFP_DATA_PORT(6)); 804 if (next_loc) 805 *next_loc = (reg >> 24) & CHAIN_ID_MASK; 806 807 /* Clear its valid bits */ 808 reg = core_readl(priv, CORE_CFP_DATA_PORT(0)); 809 reg &= ~SLICE_VALID; 810 core_writel(priv, reg, CORE_CFP_DATA_PORT(0)); 811 812 /* Write back this entry into the TCAM now */ 813 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); 814 if (ret) 815 return ret; 816 817 clear_bit(loc, priv->cfp.used); 818 clear_bit(loc, priv->cfp.unique); 819 820 return 0; 821 } 822 823 static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, 824 u32 loc) 825 { 826 u32 next_loc = 0; 827 int ret; 828 829 ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc); 830 if (ret) 831 return ret; 832 833 /* If this was an IPv6 rule, delete is companion rule too */ 834 if (next_loc) 835 ret = bcm_sf2_cfp_rule_del_one(priv, port, next_loc, NULL); 836 837 return ret; 838 } 839 840 static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow) 841 { 842 unsigned int i; 843 844 for (i = 0; i < sizeof(flow->m_u); i++) 845 flow->m_u.hdata[i] ^= 0xff; 846 847 flow->m_ext.vlan_etype ^= cpu_to_be16(~0); 848 flow->m_ext.vlan_tci ^= cpu_to_be16(~0); 849 flow->m_ext.data[0] ^= cpu_to_be32(~0); 850 flow->m_ext.data[1] ^= cpu_to_be32(~0); 851 } 852 853 static int bcm_sf2_cfp_unslice_ipv4(struct bcm_sf2_priv *priv, 854 struct ethtool_tcpip4_spec *v4_spec, 855 bool mask) 856 { 857 u32 reg, offset, ipv4; 858 u16 src_dst_port; 859 860 if (mask) 861 offset = CORE_CFP_MASK_PORT(3); 862 else 863 offset = CORE_CFP_DATA_PORT(3); 864 865 reg = core_readl(priv, offset); 866 /* src port [15:8] */ 867 src_dst_port = reg << 8; 868 869 if (mask) 870 offset = CORE_CFP_MASK_PORT(2); 871 else 872 offset = CORE_CFP_DATA_PORT(2); 873 874 reg = core_readl(priv, offset); 875 /* src port [7:0] */ 876 src_dst_port |= (reg >> 24); 877 878 v4_spec->pdst = cpu_to_be16(src_dst_port); 879 v4_spec->psrc = cpu_to_be16((u16)(reg >> 8)); 880 881 /* IPv4 dst [15:8] */ 882 ipv4 = (reg & 0xff) << 8; 883 884 if (mask) 885 offset = CORE_CFP_MASK_PORT(1); 886 else 887 offset = CORE_CFP_DATA_PORT(1); 888 889 reg = core_readl(priv, offset); 890 /* IPv4 dst [31:16] */ 891 ipv4 |= ((reg >> 8) & 0xffff) << 16; 892 /* IPv4 dst [7:0] */ 893 ipv4 |= (reg >> 24) & 0xff; 894 v4_spec->ip4dst = cpu_to_be32(ipv4); 895 896 /* IPv4 src [15:8] */ 897 ipv4 = (reg & 0xff) << 8; 898 899 if (mask) 900 offset = CORE_CFP_MASK_PORT(0); 901 else 902 offset = CORE_CFP_DATA_PORT(0); 903 reg = core_readl(priv, offset); 904 905 /* Once the TCAM is programmed, the mask reflects the slice number 906 * being matched, don't bother checking it when reading back the 907 * mask spec 908 */ 909 if (!mask && !(reg & SLICE_VALID)) 910 return -EINVAL; 911 912 /* IPv4 src [7:0] */ 913 ipv4 |= (reg >> 24) & 0xff; 914 /* IPv4 src [31:16] */ 915 ipv4 |= ((reg >> 8) & 0xffff) << 16; 916 v4_spec->ip4src = cpu_to_be32(ipv4); 917 918 return 0; 919 } 920 921 static int bcm_sf2_cfp_ipv4_rule_get(struct bcm_sf2_priv *priv, int port, 922 struct ethtool_rx_flow_spec *fs) 923 { 924 struct ethtool_tcpip4_spec *v4_spec = NULL, *v4_m_spec = NULL; 925 u32 reg; 926 int ret; 927 928 reg = core_readl(priv, CORE_CFP_DATA_PORT(6)); 929 930 switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) { 931 case IPPROTO_TCP: 932 fs->flow_type = TCP_V4_FLOW; 933 v4_spec = &fs->h_u.tcp_ip4_spec; 934 v4_m_spec = &fs->m_u.tcp_ip4_spec; 935 break; 936 case IPPROTO_UDP: 937 fs->flow_type = UDP_V4_FLOW; 938 v4_spec = &fs->h_u.udp_ip4_spec; 939 v4_m_spec = &fs->m_u.udp_ip4_spec; 940 break; 941 default: 942 return -EINVAL; 943 } 944 945 fs->m_ext.data[0] = cpu_to_be32((reg >> IP_FRAG_SHIFT) & 1); 946 v4_spec->tos = (reg >> IPTOS_SHIFT) & IPTOS_MASK; 947 948 ret = bcm_sf2_cfp_unslice_ipv4(priv, v4_spec, false); 949 if (ret) 950 return ret; 951 952 return bcm_sf2_cfp_unslice_ipv4(priv, v4_m_spec, true); 953 } 954 955 static int bcm_sf2_cfp_unslice_ipv6(struct bcm_sf2_priv *priv, 956 __be32 *ip6_addr, __be16 *port, 957 bool mask) 958 { 959 u32 reg, tmp, offset; 960 961 /* C-Tag [31:24] 962 * UDF_n_B8 [23:8] (port) 963 * UDF_n_B7 (upper) [7:0] (addr[15:8]) 964 */ 965 if (mask) 966 offset = CORE_CFP_MASK_PORT(4); 967 else 968 offset = CORE_CFP_DATA_PORT(4); 969 reg = core_readl(priv, offset); 970 *port = cpu_to_be32(reg) >> 8; 971 tmp = (u32)(reg & 0xff) << 8; 972 973 /* UDF_n_B7 (lower) [31:24] (addr[7:0]) 974 * UDF_n_B6 [23:8] (addr[31:16]) 975 * UDF_n_B5 (upper) [7:0] (addr[47:40]) 976 */ 977 if (mask) 978 offset = CORE_CFP_MASK_PORT(3); 979 else 980 offset = CORE_CFP_DATA_PORT(3); 981 reg = core_readl(priv, offset); 982 tmp |= (reg >> 24) & 0xff; 983 tmp |= (u32)((reg >> 8) << 16); 984 ip6_addr[3] = cpu_to_be32(tmp); 985 tmp = (u32)(reg & 0xff) << 8; 986 987 /* UDF_n_B5 (lower) [31:24] (addr[39:32]) 988 * UDF_n_B4 [23:8] (addr[63:48]) 989 * UDF_n_B3 (upper) [7:0] (addr[79:72]) 990 */ 991 if (mask) 992 offset = CORE_CFP_MASK_PORT(2); 993 else 994 offset = CORE_CFP_DATA_PORT(2); 995 reg = core_readl(priv, offset); 996 tmp |= (reg >> 24) & 0xff; 997 tmp |= (u32)((reg >> 8) << 16); 998 ip6_addr[2] = cpu_to_be32(tmp); 999 tmp = (u32)(reg & 0xff) << 8; 1000 1001 /* UDF_n_B3 (lower) [31:24] (addr[71:64]) 1002 * UDF_n_B2 [23:8] (addr[95:80]) 1003 * UDF_n_B1 (upper) [7:0] (addr[111:104]) 1004 */ 1005 if (mask) 1006 offset = CORE_CFP_MASK_PORT(1); 1007 else 1008 offset = CORE_CFP_DATA_PORT(1); 1009 reg = core_readl(priv, offset); 1010 tmp |= (reg >> 24) & 0xff; 1011 tmp |= (u32)((reg >> 8) << 16); 1012 ip6_addr[1] = cpu_to_be32(tmp); 1013 tmp = (u32)(reg & 0xff) << 8; 1014 1015 /* UDF_n_B1 (lower) [31:24] (addr[103:96]) 1016 * UDF_n_B0 [23:8] (addr[127:112]) 1017 * Reserved [7:4] 1018 * Slice ID [3:2] 1019 * Slice valid [1:0] 1020 */ 1021 if (mask) 1022 offset = CORE_CFP_MASK_PORT(0); 1023 else 1024 offset = CORE_CFP_DATA_PORT(0); 1025 reg = core_readl(priv, offset); 1026 tmp |= (reg >> 24) & 0xff; 1027 tmp |= (u32)((reg >> 8) << 16); 1028 ip6_addr[0] = cpu_to_be32(tmp); 1029 1030 if (!mask && !(reg & SLICE_VALID)) 1031 return -EINVAL; 1032 1033 return 0; 1034 } 1035 1036 static int bcm_sf2_cfp_ipv6_rule_get(struct bcm_sf2_priv *priv, int port, 1037 struct ethtool_rx_flow_spec *fs, 1038 u32 next_loc) 1039 { 1040 struct ethtool_tcpip6_spec *v6_spec = NULL, *v6_m_spec = NULL; 1041 u32 reg; 1042 int ret; 1043 1044 /* UDPv6 and TCPv6 both use ethtool_tcpip6_spec so we are fine 1045 * assuming tcp_ip6_spec here being an union. 1046 */ 1047 v6_spec = &fs->h_u.tcp_ip6_spec; 1048 v6_m_spec = &fs->m_u.tcp_ip6_spec; 1049 1050 /* Read the second half first */ 1051 ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6dst, &v6_spec->pdst, 1052 false); 1053 if (ret) 1054 return ret; 1055 1056 ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6dst, 1057 &v6_m_spec->pdst, true); 1058 if (ret) 1059 return ret; 1060 1061 /* Read last to avoid next entry clobbering the results during search 1062 * operations. We would not have the port enabled for this rule, so 1063 * don't bother checking it. 1064 */ 1065 (void)core_readl(priv, CORE_CFP_DATA_PORT(7)); 1066 1067 /* The slice number is valid, so read the rule we are chained from now 1068 * which is our first half. 1069 */ 1070 bcm_sf2_cfp_rule_addr_set(priv, next_loc); 1071 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL); 1072 if (ret) 1073 return ret; 1074 1075 reg = core_readl(priv, CORE_CFP_DATA_PORT(6)); 1076 1077 switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) { 1078 case IPPROTO_TCP: 1079 fs->flow_type = TCP_V6_FLOW; 1080 break; 1081 case IPPROTO_UDP: 1082 fs->flow_type = UDP_V6_FLOW; 1083 break; 1084 default: 1085 return -EINVAL; 1086 } 1087 1088 ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6src, &v6_spec->psrc, 1089 false); 1090 if (ret) 1091 return ret; 1092 1093 return bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6src, 1094 &v6_m_spec->psrc, true); 1095 } 1096 1097 static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port, 1098 struct ethtool_rxnfc *nfc) 1099 { 1100 u32 reg, ipv4_or_chain_id; 1101 unsigned int queue_num; 1102 int ret; 1103 1104 bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location); 1105 1106 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM); 1107 if (ret) 1108 return ret; 1109 1110 reg = core_readl(priv, CORE_ACT_POL_DATA0); 1111 1112 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL); 1113 if (ret) 1114 return ret; 1115 1116 /* Extract the destination port */ 1117 nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) & 1118 DST_MAP_IB_MASK) - 1; 1119 1120 /* There is no Port 6, so we compensate for that here */ 1121 if (nfc->fs.ring_cookie >= 6) 1122 nfc->fs.ring_cookie++; 1123 nfc->fs.ring_cookie *= SF2_NUM_EGRESS_QUEUES; 1124 1125 /* Extract the destination queue */ 1126 queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK; 1127 nfc->fs.ring_cookie += queue_num; 1128 1129 /* Extract the L3_FRAMING or CHAIN_ID */ 1130 reg = core_readl(priv, CORE_CFP_DATA_PORT(6)); 1131 1132 /* With IPv6 rules this would contain a non-zero chain ID since 1133 * we reserve entry 0 and it cannot be used. So if we read 0 here 1134 * this means an IPv4 rule. 1135 */ 1136 ipv4_or_chain_id = (reg >> L3_FRAMING_SHIFT) & 0xff; 1137 if (ipv4_or_chain_id == 0) 1138 ret = bcm_sf2_cfp_ipv4_rule_get(priv, port, &nfc->fs); 1139 else 1140 ret = bcm_sf2_cfp_ipv6_rule_get(priv, port, &nfc->fs, 1141 ipv4_or_chain_id); 1142 if (ret) 1143 return ret; 1144 1145 /* Read last to avoid next entry clobbering the results during search 1146 * operations 1147 */ 1148 reg = core_readl(priv, CORE_CFP_DATA_PORT(7)); 1149 if (!(reg & 1 << port)) 1150 return -EINVAL; 1151 1152 bcm_sf2_invert_masks(&nfc->fs); 1153 1154 /* Put the TCAM size here */ 1155 nfc->data = bcm_sf2_cfp_rule_size(priv); 1156 1157 return 0; 1158 } 1159 1160 /* We implement the search doing a TCAM search operation */ 1161 static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv, 1162 int port, struct ethtool_rxnfc *nfc, 1163 u32 *rule_locs) 1164 { 1165 unsigned int index = 1, rules_cnt = 0; 1166 1167 for_each_set_bit_from(index, priv->cfp.unique, priv->num_cfp_rules) { 1168 rule_locs[rules_cnt] = index; 1169 rules_cnt++; 1170 } 1171 1172 /* Put the TCAM size here */ 1173 nfc->data = bcm_sf2_cfp_rule_size(priv); 1174 nfc->rule_cnt = rules_cnt; 1175 1176 return 0; 1177 } 1178 1179 int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, 1180 struct ethtool_rxnfc *nfc, u32 *rule_locs) 1181 { 1182 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 1183 int ret = 0; 1184 1185 mutex_lock(&priv->cfp.lock); 1186 1187 switch (nfc->cmd) { 1188 case ETHTOOL_GRXCLSRLCNT: 1189 /* Subtract the default, unusable rule */ 1190 nfc->rule_cnt = bitmap_weight(priv->cfp.unique, 1191 priv->num_cfp_rules) - 1; 1192 /* We support specifying rule locations */ 1193 nfc->data |= RX_CLS_LOC_SPECIAL; 1194 break; 1195 case ETHTOOL_GRXCLSRULE: 1196 ret = bcm_sf2_cfp_rule_get(priv, port, nfc); 1197 break; 1198 case ETHTOOL_GRXCLSRLALL: 1199 ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs); 1200 break; 1201 default: 1202 ret = -EOPNOTSUPP; 1203 break; 1204 } 1205 1206 mutex_unlock(&priv->cfp.lock); 1207 1208 return ret; 1209 } 1210 1211 int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port, 1212 struct ethtool_rxnfc *nfc) 1213 { 1214 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 1215 int ret = 0; 1216 1217 mutex_lock(&priv->cfp.lock); 1218 1219 switch (nfc->cmd) { 1220 case ETHTOOL_SRXCLSRLINS: 1221 ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs); 1222 break; 1223 1224 case ETHTOOL_SRXCLSRLDEL: 1225 ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location); 1226 break; 1227 default: 1228 ret = -EOPNOTSUPP; 1229 break; 1230 } 1231 1232 mutex_unlock(&priv->cfp.lock); 1233 1234 return ret; 1235 } 1236 1237 int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv) 1238 { 1239 unsigned int timeout = 1000; 1240 u32 reg; 1241 1242 reg = core_readl(priv, CORE_CFP_ACC); 1243 reg |= TCAM_RESET; 1244 core_writel(priv, reg, CORE_CFP_ACC); 1245 1246 do { 1247 reg = core_readl(priv, CORE_CFP_ACC); 1248 if (!(reg & TCAM_RESET)) 1249 break; 1250 1251 cpu_relax(); 1252 } while (timeout--); 1253 1254 if (!timeout) 1255 return -ETIMEDOUT; 1256 1257 return 0; 1258 } 1259