1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Broadcom Starfighter 2 DSA switch CFP support 4 * 5 * Copyright (C) 2016, Broadcom 6 */ 7 8 #include <linux/list.h> 9 #include <linux/ethtool.h> 10 #include <linux/if_ether.h> 11 #include <linux/in.h> 12 #include <linux/netdevice.h> 13 #include <net/dsa.h> 14 #include <linux/bitmap.h> 15 #include <net/flow_offload.h> 16 17 #include "bcm_sf2.h" 18 #include "bcm_sf2_regs.h" 19 20 struct cfp_rule { 21 int port; 22 struct ethtool_rx_flow_spec fs; 23 struct list_head next; 24 }; 25 26 struct cfp_udf_slice_layout { 27 u8 slices[UDFS_PER_SLICE]; 28 u32 mask_value; 29 u32 base_offset; 30 }; 31 32 struct cfp_udf_layout { 33 struct cfp_udf_slice_layout udfs[UDF_NUM_SLICES]; 34 }; 35 36 static const u8 zero_slice[UDFS_PER_SLICE] = { }; 37 38 /* UDF slices layout for a TCPv4/UDPv4 specification */ 39 static const struct cfp_udf_layout udf_tcpip4_layout = { 40 .udfs = { 41 [1] = { 42 .slices = { 43 /* End of L2, byte offset 12, src IP[0:15] */ 44 CFG_UDF_EOL2 | 6, 45 /* End of L2, byte offset 14, src IP[16:31] */ 46 CFG_UDF_EOL2 | 7, 47 /* End of L2, byte offset 16, dst IP[0:15] */ 48 CFG_UDF_EOL2 | 8, 49 /* End of L2, byte offset 18, dst IP[16:31] */ 50 CFG_UDF_EOL2 | 9, 51 /* End of L3, byte offset 0, src port */ 52 CFG_UDF_EOL3 | 0, 53 /* End of L3, byte offset 2, dst port */ 54 CFG_UDF_EOL3 | 1, 55 0, 0, 0 56 }, 57 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG, 58 .base_offset = CORE_UDF_0_A_0_8_PORT_0 + UDF_SLICE_OFFSET, 59 }, 60 }, 61 }; 62 63 /* UDF slices layout for a TCPv6/UDPv6 specification */ 64 static const struct cfp_udf_layout udf_tcpip6_layout = { 65 .udfs = { 66 [0] = { 67 .slices = { 68 /* End of L2, byte offset 8, src IP[0:15] */ 69 CFG_UDF_EOL2 | 4, 70 /* End of L2, byte offset 10, src IP[16:31] */ 71 CFG_UDF_EOL2 | 5, 72 /* End of L2, byte offset 12, src IP[32:47] */ 73 CFG_UDF_EOL2 | 6, 74 /* End of L2, byte offset 14, src IP[48:63] */ 75 CFG_UDF_EOL2 | 7, 76 /* End of L2, byte offset 16, src IP[64:79] */ 77 CFG_UDF_EOL2 | 8, 78 /* End of L2, byte offset 18, src IP[80:95] */ 79 CFG_UDF_EOL2 | 9, 80 /* End of L2, byte offset 20, src IP[96:111] */ 81 CFG_UDF_EOL2 | 10, 82 /* End of L2, byte offset 22, src IP[112:127] */ 83 CFG_UDF_EOL2 | 11, 84 /* End of L3, byte offset 0, src port */ 85 CFG_UDF_EOL3 | 0, 86 }, 87 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG, 88 .base_offset = CORE_UDF_0_B_0_8_PORT_0, 89 }, 90 [3] = { 91 .slices = { 92 /* End of L2, byte offset 24, dst IP[0:15] */ 93 CFG_UDF_EOL2 | 12, 94 /* End of L2, byte offset 26, dst IP[16:31] */ 95 CFG_UDF_EOL2 | 13, 96 /* End of L2, byte offset 28, dst IP[32:47] */ 97 CFG_UDF_EOL2 | 14, 98 /* End of L2, byte offset 30, dst IP[48:63] */ 99 CFG_UDF_EOL2 | 15, 100 /* End of L2, byte offset 32, dst IP[64:79] */ 101 CFG_UDF_EOL2 | 16, 102 /* End of L2, byte offset 34, dst IP[80:95] */ 103 CFG_UDF_EOL2 | 17, 104 /* End of L2, byte offset 36, dst IP[96:111] */ 105 CFG_UDF_EOL2 | 18, 106 /* End of L2, byte offset 38, dst IP[112:127] */ 107 CFG_UDF_EOL2 | 19, 108 /* End of L3, byte offset 2, dst port */ 109 CFG_UDF_EOL3 | 1, 110 }, 111 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG, 112 .base_offset = CORE_UDF_0_D_0_11_PORT_0, 113 }, 114 }, 115 }; 116 117 static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout) 118 { 119 unsigned int i, count = 0; 120 121 for (i = 0; i < UDFS_PER_SLICE; i++) { 122 if (layout[i] != 0) 123 count++; 124 } 125 126 return count; 127 } 128 129 static inline u32 udf_upper_bits(unsigned int num_udf) 130 { 131 return GENMASK(num_udf - 1, 0) >> (UDFS_PER_SLICE - 1); 132 } 133 134 static inline u32 udf_lower_bits(unsigned int num_udf) 135 { 136 return (u8)GENMASK(num_udf - 1, 0); 137 } 138 139 static unsigned int bcm_sf2_get_slice_number(const struct cfp_udf_layout *l, 140 unsigned int start) 141 { 142 const struct cfp_udf_slice_layout *slice_layout; 143 unsigned int slice_idx; 144 145 for (slice_idx = start; slice_idx < UDF_NUM_SLICES; slice_idx++) { 146 slice_layout = &l->udfs[slice_idx]; 147 if (memcmp(slice_layout->slices, zero_slice, 148 sizeof(zero_slice))) 149 break; 150 } 151 152 return slice_idx; 153 } 154 155 static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv, 156 const struct cfp_udf_layout *layout, 157 unsigned int slice_num) 158 { 159 u32 offset = layout->udfs[slice_num].base_offset; 160 unsigned int i; 161 162 for (i = 0; i < UDFS_PER_SLICE; i++) 163 core_writel(priv, layout->udfs[slice_num].slices[i], 164 offset + i * 4); 165 } 166 167 static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op) 168 { 169 unsigned int timeout = 1000; 170 u32 reg; 171 172 reg = core_readl(priv, CORE_CFP_ACC); 173 reg &= ~(OP_SEL_MASK | RAM_SEL_MASK); 174 reg |= OP_STR_DONE | op; 175 core_writel(priv, reg, CORE_CFP_ACC); 176 177 do { 178 reg = core_readl(priv, CORE_CFP_ACC); 179 if (!(reg & OP_STR_DONE)) 180 break; 181 182 cpu_relax(); 183 } while (timeout--); 184 185 if (!timeout) 186 return -ETIMEDOUT; 187 188 return 0; 189 } 190 191 static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv, 192 unsigned int addr) 193 { 194 u32 reg; 195 196 WARN_ON(addr >= priv->num_cfp_rules); 197 198 reg = core_readl(priv, CORE_CFP_ACC); 199 reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT); 200 reg |= addr << XCESS_ADDR_SHIFT; 201 core_writel(priv, reg, CORE_CFP_ACC); 202 } 203 204 static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv) 205 { 206 /* Entry #0 is reserved */ 207 return priv->num_cfp_rules - 1; 208 } 209 210 static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv, 211 unsigned int rule_index, 212 int src_port, 213 unsigned int port_num, 214 unsigned int queue_num, 215 bool fwd_map_change) 216 { 217 int ret; 218 u32 reg; 219 220 /* Replace ARL derived destination with DST_MAP derived, define 221 * which port and queue this should be forwarded to. 222 */ 223 if (fwd_map_change) 224 reg = CHANGE_FWRD_MAP_IB_REP_ARL | 225 BIT(port_num + DST_MAP_IB_SHIFT) | 226 CHANGE_TC | queue_num << NEW_TC_SHIFT; 227 else 228 reg = 0; 229 230 /* Enable looping back to the original port */ 231 if (src_port == port_num) 232 reg |= LOOP_BK_EN; 233 234 core_writel(priv, reg, CORE_ACT_POL_DATA0); 235 236 /* Set classification ID that needs to be put in Broadcom tag */ 237 core_writel(priv, rule_index << CHAIN_ID_SHIFT, CORE_ACT_POL_DATA1); 238 239 core_writel(priv, 0, CORE_ACT_POL_DATA2); 240 241 /* Configure policer RAM now */ 242 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM); 243 if (ret) { 244 pr_err("Policer entry at %d failed\n", rule_index); 245 return ret; 246 } 247 248 /* Disable the policer */ 249 core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0); 250 251 /* Now the rate meter */ 252 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM); 253 if (ret) { 254 pr_err("Meter entry at %d failed\n", rule_index); 255 return ret; 256 } 257 258 return 0; 259 } 260 261 static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv, 262 struct flow_dissector_key_ipv4_addrs *addrs, 263 struct flow_dissector_key_ports *ports, 264 unsigned int slice_num, 265 bool mask) 266 { 267 u32 reg, offset; 268 269 /* C-Tag [31:24] 270 * UDF_n_A8 [23:8] 271 * UDF_n_A7 [7:0] 272 */ 273 reg = 0; 274 if (mask) 275 offset = CORE_CFP_MASK_PORT(4); 276 else 277 offset = CORE_CFP_DATA_PORT(4); 278 core_writel(priv, reg, offset); 279 280 /* UDF_n_A7 [31:24] 281 * UDF_n_A6 [23:8] 282 * UDF_n_A5 [7:0] 283 */ 284 reg = be16_to_cpu(ports->dst) >> 8; 285 if (mask) 286 offset = CORE_CFP_MASK_PORT(3); 287 else 288 offset = CORE_CFP_DATA_PORT(3); 289 core_writel(priv, reg, offset); 290 291 /* UDF_n_A5 [31:24] 292 * UDF_n_A4 [23:8] 293 * UDF_n_A3 [7:0] 294 */ 295 reg = (be16_to_cpu(ports->dst) & 0xff) << 24 | 296 (u32)be16_to_cpu(ports->src) << 8 | 297 (be32_to_cpu(addrs->dst) & 0x0000ff00) >> 8; 298 if (mask) 299 offset = CORE_CFP_MASK_PORT(2); 300 else 301 offset = CORE_CFP_DATA_PORT(2); 302 core_writel(priv, reg, offset); 303 304 /* UDF_n_A3 [31:24] 305 * UDF_n_A2 [23:8] 306 * UDF_n_A1 [7:0] 307 */ 308 reg = (u32)(be32_to_cpu(addrs->dst) & 0xff) << 24 | 309 (u32)(be32_to_cpu(addrs->dst) >> 16) << 8 | 310 (be32_to_cpu(addrs->src) & 0x0000ff00) >> 8; 311 if (mask) 312 offset = CORE_CFP_MASK_PORT(1); 313 else 314 offset = CORE_CFP_DATA_PORT(1); 315 core_writel(priv, reg, offset); 316 317 /* UDF_n_A1 [31:24] 318 * UDF_n_A0 [23:8] 319 * Reserved [7:4] 320 * Slice ID [3:2] 321 * Slice valid [1:0] 322 */ 323 reg = (u32)(be32_to_cpu(addrs->src) & 0xff) << 24 | 324 (u32)(be32_to_cpu(addrs->src) >> 16) << 8 | 325 SLICE_NUM(slice_num) | SLICE_VALID; 326 if (mask) 327 offset = CORE_CFP_MASK_PORT(0); 328 else 329 offset = CORE_CFP_DATA_PORT(0); 330 core_writel(priv, reg, offset); 331 } 332 333 static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, 334 unsigned int port_num, 335 unsigned int queue_num, 336 struct ethtool_rx_flow_spec *fs) 337 { 338 struct ethtool_rx_flow_spec_input input = {}; 339 const struct cfp_udf_layout *layout; 340 unsigned int slice_num, rule_index; 341 struct ethtool_rx_flow_rule *flow; 342 struct flow_match_ipv4_addrs ipv4; 343 struct flow_match_ports ports; 344 struct flow_match_ip ip; 345 u8 ip_proto, ip_frag; 346 u8 num_udf; 347 u32 reg; 348 int ret; 349 350 switch (fs->flow_type & ~FLOW_EXT) { 351 case TCP_V4_FLOW: 352 ip_proto = IPPROTO_TCP; 353 break; 354 case UDP_V4_FLOW: 355 ip_proto = IPPROTO_UDP; 356 break; 357 default: 358 return -EINVAL; 359 } 360 361 ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1); 362 363 /* Locate the first rule available */ 364 if (fs->location == RX_CLS_LOC_ANY) 365 rule_index = find_first_zero_bit(priv->cfp.used, 366 priv->num_cfp_rules); 367 else 368 rule_index = fs->location; 369 370 if (rule_index > bcm_sf2_cfp_rule_size(priv)) 371 return -ENOSPC; 372 373 input.fs = fs; 374 flow = ethtool_rx_flow_rule_create(&input); 375 if (IS_ERR(flow)) 376 return PTR_ERR(flow); 377 378 flow_rule_match_ipv4_addrs(flow->rule, &ipv4); 379 flow_rule_match_ports(flow->rule, &ports); 380 flow_rule_match_ip(flow->rule, &ip); 381 382 layout = &udf_tcpip4_layout; 383 /* We only use one UDF slice for now */ 384 slice_num = bcm_sf2_get_slice_number(layout, 0); 385 if (slice_num == UDF_NUM_SLICES) { 386 ret = -EINVAL; 387 goto out_err_flow_rule; 388 } 389 390 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices); 391 392 /* Apply the UDF layout for this filter */ 393 bcm_sf2_cfp_udf_set(priv, layout, slice_num); 394 395 /* Apply to all packets received through this port */ 396 core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7)); 397 398 /* Source port map match */ 399 core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7)); 400 401 /* S-Tag status [31:30] 402 * C-Tag status [29:28] 403 * L2 framing [27:26] 404 * L3 framing [25:24] 405 * IP ToS [23:16] 406 * IP proto [15:08] 407 * IP Fragm [7] 408 * Non 1st frag [6] 409 * IP Authen [5] 410 * TTL range [4:3] 411 * PPPoE session [2] 412 * Reserved [1] 413 * UDF_Valid[8] [0] 414 */ 415 core_writel(priv, ip.key->tos << IPTOS_SHIFT | 416 ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT | 417 udf_upper_bits(num_udf), 418 CORE_CFP_DATA_PORT(6)); 419 420 /* Mask with the specific layout for IPv4 packets */ 421 core_writel(priv, layout->udfs[slice_num].mask_value | 422 udf_upper_bits(num_udf), CORE_CFP_MASK_PORT(6)); 423 424 /* UDF_Valid[7:0] [31:24] 425 * S-Tag [23:8] 426 * C-Tag [7:0] 427 */ 428 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_DATA_PORT(5)); 429 430 /* Mask all but valid UDFs */ 431 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5)); 432 433 /* Program the match and the mask */ 434 bcm_sf2_cfp_slice_ipv4(priv, ipv4.key, ports.key, slice_num, false); 435 bcm_sf2_cfp_slice_ipv4(priv, ipv4.mask, ports.mask, SLICE_NUM_MASK, true); 436 437 /* Insert into TCAM now */ 438 bcm_sf2_cfp_rule_addr_set(priv, rule_index); 439 440 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); 441 if (ret) { 442 pr_err("TCAM entry at addr %d failed\n", rule_index); 443 goto out_err_flow_rule; 444 } 445 446 /* Insert into Action and policer RAMs now */ 447 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port, port_num, 448 queue_num, true); 449 if (ret) 450 goto out_err_flow_rule; 451 452 /* Turn on CFP for this rule now */ 453 reg = core_readl(priv, CORE_CFP_CTL_REG); 454 reg |= BIT(port); 455 core_writel(priv, reg, CORE_CFP_CTL_REG); 456 457 /* Flag the rule as being used and return it */ 458 set_bit(rule_index, priv->cfp.used); 459 set_bit(rule_index, priv->cfp.unique); 460 fs->location = rule_index; 461 462 return 0; 463 464 out_err_flow_rule: 465 ethtool_rx_flow_rule_destroy(flow); 466 return ret; 467 } 468 469 static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv, 470 const __be32 *ip6_addr, const __be16 port, 471 unsigned int slice_num, 472 bool mask) 473 { 474 u32 reg, tmp, val, offset; 475 476 /* C-Tag [31:24] 477 * UDF_n_B8 [23:8] (port) 478 * UDF_n_B7 (upper) [7:0] (addr[15:8]) 479 */ 480 reg = be32_to_cpu(ip6_addr[3]); 481 val = (u32)be16_to_cpu(port) << 8 | ((reg >> 8) & 0xff); 482 if (mask) 483 offset = CORE_CFP_MASK_PORT(4); 484 else 485 offset = CORE_CFP_DATA_PORT(4); 486 core_writel(priv, val, offset); 487 488 /* UDF_n_B7 (lower) [31:24] (addr[7:0]) 489 * UDF_n_B6 [23:8] (addr[31:16]) 490 * UDF_n_B5 (upper) [7:0] (addr[47:40]) 491 */ 492 tmp = be32_to_cpu(ip6_addr[2]); 493 val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 | 494 ((tmp >> 8) & 0xff); 495 if (mask) 496 offset = CORE_CFP_MASK_PORT(3); 497 else 498 offset = CORE_CFP_DATA_PORT(3); 499 core_writel(priv, val, offset); 500 501 /* UDF_n_B5 (lower) [31:24] (addr[39:32]) 502 * UDF_n_B4 [23:8] (addr[63:48]) 503 * UDF_n_B3 (upper) [7:0] (addr[79:72]) 504 */ 505 reg = be32_to_cpu(ip6_addr[1]); 506 val = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 | 507 ((reg >> 8) & 0xff); 508 if (mask) 509 offset = CORE_CFP_MASK_PORT(2); 510 else 511 offset = CORE_CFP_DATA_PORT(2); 512 core_writel(priv, val, offset); 513 514 /* UDF_n_B3 (lower) [31:24] (addr[71:64]) 515 * UDF_n_B2 [23:8] (addr[95:80]) 516 * UDF_n_B1 (upper) [7:0] (addr[111:104]) 517 */ 518 tmp = be32_to_cpu(ip6_addr[0]); 519 val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 | 520 ((tmp >> 8) & 0xff); 521 if (mask) 522 offset = CORE_CFP_MASK_PORT(1); 523 else 524 offset = CORE_CFP_DATA_PORT(1); 525 core_writel(priv, val, offset); 526 527 /* UDF_n_B1 (lower) [31:24] (addr[103:96]) 528 * UDF_n_B0 [23:8] (addr[127:112]) 529 * Reserved [7:4] 530 * Slice ID [3:2] 531 * Slice valid [1:0] 532 */ 533 reg = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 | 534 SLICE_NUM(slice_num) | SLICE_VALID; 535 if (mask) 536 offset = CORE_CFP_MASK_PORT(0); 537 else 538 offset = CORE_CFP_DATA_PORT(0); 539 core_writel(priv, reg, offset); 540 } 541 542 static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv, 543 int port, u32 location) 544 { 545 struct cfp_rule *rule = NULL; 546 547 list_for_each_entry(rule, &priv->cfp.rules_list, next) { 548 if (rule->port == port && rule->fs.location == location) 549 break; 550 } 551 552 return rule; 553 } 554 555 static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port, 556 struct ethtool_rx_flow_spec *fs) 557 { 558 struct cfp_rule *rule = NULL; 559 size_t fs_size = 0; 560 int ret = 1; 561 562 if (list_empty(&priv->cfp.rules_list)) 563 return ret; 564 565 list_for_each_entry(rule, &priv->cfp.rules_list, next) { 566 ret = 1; 567 if (rule->port != port) 568 continue; 569 570 if (rule->fs.flow_type != fs->flow_type || 571 rule->fs.ring_cookie != fs->ring_cookie || 572 rule->fs.h_ext.data[0] != fs->h_ext.data[0]) 573 continue; 574 575 switch (fs->flow_type & ~FLOW_EXT) { 576 case TCP_V6_FLOW: 577 case UDP_V6_FLOW: 578 fs_size = sizeof(struct ethtool_tcpip6_spec); 579 break; 580 case TCP_V4_FLOW: 581 case UDP_V4_FLOW: 582 fs_size = sizeof(struct ethtool_tcpip4_spec); 583 break; 584 default: 585 continue; 586 } 587 588 ret = memcmp(&rule->fs.h_u, &fs->h_u, fs_size); 589 ret |= memcmp(&rule->fs.m_u, &fs->m_u, fs_size); 590 if (ret == 0) 591 break; 592 } 593 594 return ret; 595 } 596 597 static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, 598 unsigned int port_num, 599 unsigned int queue_num, 600 struct ethtool_rx_flow_spec *fs) 601 { 602 struct ethtool_rx_flow_spec_input input = {}; 603 unsigned int slice_num, rule_index[2]; 604 const struct cfp_udf_layout *layout; 605 struct ethtool_rx_flow_rule *flow; 606 struct flow_match_ipv6_addrs ipv6; 607 struct flow_match_ports ports; 608 u8 ip_proto, ip_frag; 609 int ret = 0; 610 u8 num_udf; 611 u32 reg; 612 613 switch (fs->flow_type & ~FLOW_EXT) { 614 case TCP_V6_FLOW: 615 ip_proto = IPPROTO_TCP; 616 break; 617 case UDP_V6_FLOW: 618 ip_proto = IPPROTO_UDP; 619 break; 620 default: 621 return -EINVAL; 622 } 623 624 ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1); 625 626 layout = &udf_tcpip6_layout; 627 slice_num = bcm_sf2_get_slice_number(layout, 0); 628 if (slice_num == UDF_NUM_SLICES) 629 return -EINVAL; 630 631 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices); 632 633 /* Negotiate two indexes, one for the second half which we are chained 634 * from, which is what we will return to user-space, and a second one 635 * which is used to store its first half. That first half does not 636 * allow any choice of placement, so it just needs to find the next 637 * available bit. We return the second half as fs->location because 638 * that helps with the rule lookup later on since the second half is 639 * chained from its first half, we can easily identify IPv6 CFP rules 640 * by looking whether they carry a CHAIN_ID. 641 * 642 * We also want the second half to have a lower rule_index than its 643 * first half because the HW search is by incrementing addresses. 644 */ 645 if (fs->location == RX_CLS_LOC_ANY) 646 rule_index[1] = find_first_zero_bit(priv->cfp.used, 647 priv->num_cfp_rules); 648 else 649 rule_index[1] = fs->location; 650 if (rule_index[1] > bcm_sf2_cfp_rule_size(priv)) 651 return -ENOSPC; 652 653 /* Flag it as used (cleared on error path) such that we can immediately 654 * obtain a second one to chain from. 655 */ 656 set_bit(rule_index[1], priv->cfp.used); 657 658 rule_index[0] = find_first_zero_bit(priv->cfp.used, 659 priv->num_cfp_rules); 660 if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) { 661 ret = -ENOSPC; 662 goto out_err; 663 } 664 665 input.fs = fs; 666 flow = ethtool_rx_flow_rule_create(&input); 667 if (IS_ERR(flow)) { 668 ret = PTR_ERR(flow); 669 goto out_err; 670 } 671 flow_rule_match_ipv6_addrs(flow->rule, &ipv6); 672 flow_rule_match_ports(flow->rule, &ports); 673 674 /* Apply the UDF layout for this filter */ 675 bcm_sf2_cfp_udf_set(priv, layout, slice_num); 676 677 /* Apply to all packets received through this port */ 678 core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7)); 679 680 /* Source port map match */ 681 core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7)); 682 683 /* S-Tag status [31:30] 684 * C-Tag status [29:28] 685 * L2 framing [27:26] 686 * L3 framing [25:24] 687 * IP ToS [23:16] 688 * IP proto [15:08] 689 * IP Fragm [7] 690 * Non 1st frag [6] 691 * IP Authen [5] 692 * TTL range [4:3] 693 * PPPoE session [2] 694 * Reserved [1] 695 * UDF_Valid[8] [0] 696 */ 697 reg = 1 << L3_FRAMING_SHIFT | ip_proto << IPPROTO_SHIFT | 698 ip_frag << IP_FRAG_SHIFT | udf_upper_bits(num_udf); 699 core_writel(priv, reg, CORE_CFP_DATA_PORT(6)); 700 701 /* Mask with the specific layout for IPv6 packets including 702 * UDF_Valid[8] 703 */ 704 reg = layout->udfs[slice_num].mask_value | udf_upper_bits(num_udf); 705 core_writel(priv, reg, CORE_CFP_MASK_PORT(6)); 706 707 /* UDF_Valid[7:0] [31:24] 708 * S-Tag [23:8] 709 * C-Tag [7:0] 710 */ 711 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_DATA_PORT(5)); 712 713 /* Mask all but valid UDFs */ 714 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5)); 715 716 /* Slice the IPv6 source address and port */ 717 bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->src.in6_u.u6_addr32, 718 ports.key->src, slice_num, false); 719 bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->src.in6_u.u6_addr32, 720 ports.mask->src, SLICE_NUM_MASK, true); 721 722 /* Insert into TCAM now because we need to insert a second rule */ 723 bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]); 724 725 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); 726 if (ret) { 727 pr_err("TCAM entry at addr %d failed\n", rule_index[0]); 728 goto out_err_flow_rule; 729 } 730 731 /* Insert into Action and policer RAMs now */ 732 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port, port_num, 733 queue_num, false); 734 if (ret) 735 goto out_err_flow_rule; 736 737 /* Now deal with the second slice to chain this rule */ 738 slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1); 739 if (slice_num == UDF_NUM_SLICES) { 740 ret = -EINVAL; 741 goto out_err_flow_rule; 742 } 743 744 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices); 745 746 /* Apply the UDF layout for this filter */ 747 bcm_sf2_cfp_udf_set(priv, layout, slice_num); 748 749 /* Chained rule, source port match is coming from the rule we are 750 * chained from. 751 */ 752 core_writel(priv, 0, CORE_CFP_DATA_PORT(7)); 753 core_writel(priv, 0, CORE_CFP_MASK_PORT(7)); 754 755 /* 756 * CHAIN ID [31:24] chain to previous slice 757 * Reserved [23:20] 758 * UDF_Valid[11:8] [19:16] 759 * UDF_Valid[7:0] [15:8] 760 * UDF_n_D11 [7:0] 761 */ 762 reg = rule_index[0] << 24 | udf_upper_bits(num_udf) << 16 | 763 udf_lower_bits(num_udf) << 8; 764 core_writel(priv, reg, CORE_CFP_DATA_PORT(6)); 765 766 /* Mask all except chain ID, UDF Valid[8] and UDF Valid[7:0] */ 767 reg = XCESS_ADDR_MASK << 24 | udf_upper_bits(num_udf) << 16 | 768 udf_lower_bits(num_udf) << 8; 769 core_writel(priv, reg, CORE_CFP_MASK_PORT(6)); 770 771 /* Don't care */ 772 core_writel(priv, 0, CORE_CFP_DATA_PORT(5)); 773 774 /* Mask all */ 775 core_writel(priv, 0, CORE_CFP_MASK_PORT(5)); 776 777 bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->dst.in6_u.u6_addr32, 778 ports.key->dst, slice_num, false); 779 bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->dst.in6_u.u6_addr32, 780 ports.key->dst, SLICE_NUM_MASK, true); 781 782 /* Insert into TCAM now */ 783 bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]); 784 785 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); 786 if (ret) { 787 pr_err("TCAM entry at addr %d failed\n", rule_index[1]); 788 goto out_err_flow_rule; 789 } 790 791 /* Insert into Action and policer RAMs now, set chain ID to 792 * the one we are chained to 793 */ 794 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port, port_num, 795 queue_num, true); 796 if (ret) 797 goto out_err_flow_rule; 798 799 /* Turn on CFP for this rule now */ 800 reg = core_readl(priv, CORE_CFP_CTL_REG); 801 reg |= BIT(port); 802 core_writel(priv, reg, CORE_CFP_CTL_REG); 803 804 /* Flag the second half rule as being used now, return it as the 805 * location, and flag it as unique while dumping rules 806 */ 807 set_bit(rule_index[0], priv->cfp.used); 808 set_bit(rule_index[1], priv->cfp.unique); 809 fs->location = rule_index[1]; 810 811 return ret; 812 813 out_err_flow_rule: 814 ethtool_rx_flow_rule_destroy(flow); 815 out_err: 816 clear_bit(rule_index[1], priv->cfp.used); 817 return ret; 818 } 819 820 static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port, 821 struct ethtool_rx_flow_spec *fs) 822 { 823 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 824 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 825 __u64 ring_cookie = fs->ring_cookie; 826 unsigned int queue_num, port_num; 827 int ret; 828 829 /* This rule is a Wake-on-LAN filter and we must specifically 830 * target the CPU port in order for it to be working. 831 */ 832 if (ring_cookie == RX_CLS_FLOW_WAKE) 833 ring_cookie = cpu_port * SF2_NUM_EGRESS_QUEUES; 834 835 /* We do not support discarding packets, check that the 836 * destination port is enabled and that we are within the 837 * number of ports supported by the switch 838 */ 839 port_num = ring_cookie / SF2_NUM_EGRESS_QUEUES; 840 841 if (ring_cookie == RX_CLS_FLOW_DISC || 842 !(dsa_is_user_port(ds, port_num) || 843 dsa_is_cpu_port(ds, port_num)) || 844 port_num >= priv->hw_params.num_ports) 845 return -EINVAL; 846 /* 847 * We have a small oddity where Port 6 just does not have a 848 * valid bit here (so we substract by one). 849 */ 850 queue_num = ring_cookie % SF2_NUM_EGRESS_QUEUES; 851 if (port_num >= 7) 852 port_num -= 1; 853 854 switch (fs->flow_type & ~FLOW_EXT) { 855 case TCP_V4_FLOW: 856 case UDP_V4_FLOW: 857 ret = bcm_sf2_cfp_ipv4_rule_set(priv, port, port_num, 858 queue_num, fs); 859 break; 860 case TCP_V6_FLOW: 861 case UDP_V6_FLOW: 862 ret = bcm_sf2_cfp_ipv6_rule_set(priv, port, port_num, 863 queue_num, fs); 864 break; 865 default: 866 ret = -EINVAL; 867 break; 868 } 869 870 return ret; 871 } 872 873 static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, 874 struct ethtool_rx_flow_spec *fs) 875 { 876 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 877 struct cfp_rule *rule = NULL; 878 int ret = -EINVAL; 879 880 /* Check for unsupported extensions */ 881 if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype || 882 fs->m_ext.data[1])) 883 return -EINVAL; 884 885 if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES) 886 return -EINVAL; 887 888 if (fs->location != RX_CLS_LOC_ANY && 889 test_bit(fs->location, priv->cfp.used)) 890 return -EBUSY; 891 892 if (fs->location != RX_CLS_LOC_ANY && 893 fs->location > bcm_sf2_cfp_rule_size(priv)) 894 return -EINVAL; 895 896 ret = bcm_sf2_cfp_rule_cmp(priv, port, fs); 897 if (ret == 0) 898 return -EEXIST; 899 900 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 901 if (!rule) 902 return -ENOMEM; 903 904 ret = bcm_sf2_cfp_rule_insert(ds, port, fs); 905 if (ret) { 906 kfree(rule); 907 return ret; 908 } 909 910 rule->port = port; 911 memcpy(&rule->fs, fs, sizeof(*fs)); 912 list_add_tail(&rule->next, &priv->cfp.rules_list); 913 914 return ret; 915 } 916 917 static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port, 918 u32 loc, u32 *next_loc) 919 { 920 int ret; 921 u32 reg; 922 923 /* Indicate which rule we want to read */ 924 bcm_sf2_cfp_rule_addr_set(priv, loc); 925 926 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL); 927 if (ret) 928 return ret; 929 930 /* Check if this is possibly an IPv6 rule that would 931 * indicate we need to delete its companion rule 932 * as well 933 */ 934 reg = core_readl(priv, CORE_CFP_DATA_PORT(6)); 935 if (next_loc) 936 *next_loc = (reg >> 24) & CHAIN_ID_MASK; 937 938 /* Clear its valid bits */ 939 reg = core_readl(priv, CORE_CFP_DATA_PORT(0)); 940 reg &= ~SLICE_VALID; 941 core_writel(priv, reg, CORE_CFP_DATA_PORT(0)); 942 943 /* Write back this entry into the TCAM now */ 944 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); 945 if (ret) 946 return ret; 947 948 clear_bit(loc, priv->cfp.used); 949 clear_bit(loc, priv->cfp.unique); 950 951 return 0; 952 } 953 954 static int bcm_sf2_cfp_rule_remove(struct bcm_sf2_priv *priv, int port, 955 u32 loc) 956 { 957 u32 next_loc = 0; 958 int ret; 959 960 ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc); 961 if (ret) 962 return ret; 963 964 /* If this was an IPv6 rule, delete is companion rule too */ 965 if (next_loc) 966 ret = bcm_sf2_cfp_rule_del_one(priv, port, next_loc, NULL); 967 968 return ret; 969 } 970 971 static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc) 972 { 973 struct cfp_rule *rule; 974 int ret; 975 976 if (loc >= CFP_NUM_RULES) 977 return -EINVAL; 978 979 /* Refuse deleting unused rules, and those that are not unique since 980 * that could leave IPv6 rules with one of the chained rule in the 981 * table. 982 */ 983 if (!test_bit(loc, priv->cfp.unique) || loc == 0) 984 return -EINVAL; 985 986 rule = bcm_sf2_cfp_rule_find(priv, port, loc); 987 if (!rule) 988 return -EINVAL; 989 990 ret = bcm_sf2_cfp_rule_remove(priv, port, loc); 991 992 list_del(&rule->next); 993 kfree(rule); 994 995 return ret; 996 } 997 998 static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow) 999 { 1000 unsigned int i; 1001 1002 for (i = 0; i < sizeof(flow->m_u); i++) 1003 flow->m_u.hdata[i] ^= 0xff; 1004 1005 flow->m_ext.vlan_etype ^= cpu_to_be16(~0); 1006 flow->m_ext.vlan_tci ^= cpu_to_be16(~0); 1007 flow->m_ext.data[0] ^= cpu_to_be32(~0); 1008 flow->m_ext.data[1] ^= cpu_to_be32(~0); 1009 } 1010 1011 static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port, 1012 struct ethtool_rxnfc *nfc) 1013 { 1014 struct cfp_rule *rule; 1015 1016 rule = bcm_sf2_cfp_rule_find(priv, port, nfc->fs.location); 1017 if (!rule) 1018 return -EINVAL; 1019 1020 memcpy(&nfc->fs, &rule->fs, sizeof(rule->fs)); 1021 1022 bcm_sf2_invert_masks(&nfc->fs); 1023 1024 /* Put the TCAM size here */ 1025 nfc->data = bcm_sf2_cfp_rule_size(priv); 1026 1027 return 0; 1028 } 1029 1030 /* We implement the search doing a TCAM search operation */ 1031 static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv, 1032 int port, struct ethtool_rxnfc *nfc, 1033 u32 *rule_locs) 1034 { 1035 unsigned int index = 1, rules_cnt = 0; 1036 1037 for_each_set_bit_from(index, priv->cfp.unique, priv->num_cfp_rules) { 1038 rule_locs[rules_cnt] = index; 1039 rules_cnt++; 1040 } 1041 1042 /* Put the TCAM size here */ 1043 nfc->data = bcm_sf2_cfp_rule_size(priv); 1044 nfc->rule_cnt = rules_cnt; 1045 1046 return 0; 1047 } 1048 1049 int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, 1050 struct ethtool_rxnfc *nfc, u32 *rule_locs) 1051 { 1052 struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master; 1053 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 1054 int ret = 0; 1055 1056 mutex_lock(&priv->cfp.lock); 1057 1058 switch (nfc->cmd) { 1059 case ETHTOOL_GRXCLSRLCNT: 1060 /* Subtract the default, unusable rule */ 1061 nfc->rule_cnt = bitmap_weight(priv->cfp.unique, 1062 priv->num_cfp_rules) - 1; 1063 /* We support specifying rule locations */ 1064 nfc->data |= RX_CLS_LOC_SPECIAL; 1065 break; 1066 case ETHTOOL_GRXCLSRULE: 1067 ret = bcm_sf2_cfp_rule_get(priv, port, nfc); 1068 break; 1069 case ETHTOOL_GRXCLSRLALL: 1070 ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs); 1071 break; 1072 default: 1073 ret = -EOPNOTSUPP; 1074 break; 1075 } 1076 1077 mutex_unlock(&priv->cfp.lock); 1078 1079 if (ret) 1080 return ret; 1081 1082 /* Pass up the commands to the attached master network device */ 1083 if (p->ethtool_ops->get_rxnfc) { 1084 ret = p->ethtool_ops->get_rxnfc(p, nfc, rule_locs); 1085 if (ret == -EOPNOTSUPP) 1086 ret = 0; 1087 } 1088 1089 return ret; 1090 } 1091 1092 int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port, 1093 struct ethtool_rxnfc *nfc) 1094 { 1095 struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master; 1096 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 1097 int ret = 0; 1098 1099 mutex_lock(&priv->cfp.lock); 1100 1101 switch (nfc->cmd) { 1102 case ETHTOOL_SRXCLSRLINS: 1103 ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs); 1104 break; 1105 1106 case ETHTOOL_SRXCLSRLDEL: 1107 ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location); 1108 break; 1109 default: 1110 ret = -EOPNOTSUPP; 1111 break; 1112 } 1113 1114 mutex_unlock(&priv->cfp.lock); 1115 1116 if (ret) 1117 return ret; 1118 1119 /* Pass up the commands to the attached master network device. 1120 * This can fail, so rollback the operation if we need to. 1121 */ 1122 if (p->ethtool_ops->set_rxnfc) { 1123 ret = p->ethtool_ops->set_rxnfc(p, nfc); 1124 if (ret && ret != -EOPNOTSUPP) { 1125 mutex_lock(&priv->cfp.lock); 1126 bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location); 1127 mutex_unlock(&priv->cfp.lock); 1128 } else { 1129 ret = 0; 1130 } 1131 } 1132 1133 return ret; 1134 } 1135 1136 int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv) 1137 { 1138 unsigned int timeout = 1000; 1139 u32 reg; 1140 1141 reg = core_readl(priv, CORE_CFP_ACC); 1142 reg |= TCAM_RESET; 1143 core_writel(priv, reg, CORE_CFP_ACC); 1144 1145 do { 1146 reg = core_readl(priv, CORE_CFP_ACC); 1147 if (!(reg & TCAM_RESET)) 1148 break; 1149 1150 cpu_relax(); 1151 } while (timeout--); 1152 1153 if (!timeout) 1154 return -ETIMEDOUT; 1155 1156 return 0; 1157 } 1158 1159 void bcm_sf2_cfp_exit(struct dsa_switch *ds) 1160 { 1161 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 1162 struct cfp_rule *rule, *n; 1163 1164 if (list_empty(&priv->cfp.rules_list)) 1165 return; 1166 1167 list_for_each_entry_safe_reverse(rule, n, &priv->cfp.rules_list, next) 1168 bcm_sf2_cfp_rule_del(priv, rule->port, rule->fs.location); 1169 } 1170 1171 int bcm_sf2_cfp_resume(struct dsa_switch *ds) 1172 { 1173 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 1174 struct cfp_rule *rule; 1175 int ret = 0; 1176 u32 reg; 1177 1178 if (list_empty(&priv->cfp.rules_list)) 1179 return ret; 1180 1181 reg = core_readl(priv, CORE_CFP_CTL_REG); 1182 reg &= ~CFP_EN_MAP_MASK; 1183 core_writel(priv, reg, CORE_CFP_CTL_REG); 1184 1185 ret = bcm_sf2_cfp_rst(priv); 1186 if (ret) 1187 return ret; 1188 1189 list_for_each_entry(rule, &priv->cfp.rules_list, next) { 1190 ret = bcm_sf2_cfp_rule_remove(priv, rule->port, 1191 rule->fs.location); 1192 if (ret) { 1193 dev_err(ds->dev, "failed to remove rule\n"); 1194 return ret; 1195 } 1196 1197 ret = bcm_sf2_cfp_rule_insert(ds, rule->port, &rule->fs); 1198 if (ret) { 1199 dev_err(ds->dev, "failed to restore rule\n"); 1200 return ret; 1201 } 1202 } 1203 1204 return ret; 1205 } 1206 1207 static const struct bcm_sf2_cfp_stat { 1208 unsigned int offset; 1209 unsigned int ram_loc; 1210 const char *name; 1211 } bcm_sf2_cfp_stats[] = { 1212 { 1213 .offset = CORE_STAT_GREEN_CNTR, 1214 .ram_loc = GREEN_STAT_RAM, 1215 .name = "Green" 1216 }, 1217 { 1218 .offset = CORE_STAT_YELLOW_CNTR, 1219 .ram_loc = YELLOW_STAT_RAM, 1220 .name = "Yellow" 1221 }, 1222 { 1223 .offset = CORE_STAT_RED_CNTR, 1224 .ram_loc = RED_STAT_RAM, 1225 .name = "Red" 1226 }, 1227 }; 1228 1229 void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port, 1230 u32 stringset, uint8_t *data) 1231 { 1232 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 1233 unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats); 1234 char buf[ETH_GSTRING_LEN]; 1235 unsigned int i, j, iter; 1236 1237 if (stringset != ETH_SS_STATS) 1238 return; 1239 1240 for (i = 1; i < priv->num_cfp_rules; i++) { 1241 for (j = 0; j < s; j++) { 1242 snprintf(buf, sizeof(buf), 1243 "CFP%03d_%sCntr", 1244 i, bcm_sf2_cfp_stats[j].name); 1245 iter = (i - 1) * s + j; 1246 strlcpy(data + iter * ETH_GSTRING_LEN, 1247 buf, ETH_GSTRING_LEN); 1248 } 1249 } 1250 } 1251 1252 void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch *ds, int port, 1253 uint64_t *data) 1254 { 1255 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 1256 unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats); 1257 const struct bcm_sf2_cfp_stat *stat; 1258 unsigned int i, j, iter; 1259 struct cfp_rule *rule; 1260 int ret; 1261 1262 mutex_lock(&priv->cfp.lock); 1263 for (i = 1; i < priv->num_cfp_rules; i++) { 1264 rule = bcm_sf2_cfp_rule_find(priv, port, i); 1265 if (!rule) 1266 continue; 1267 1268 for (j = 0; j < s; j++) { 1269 stat = &bcm_sf2_cfp_stats[j]; 1270 1271 bcm_sf2_cfp_rule_addr_set(priv, i); 1272 ret = bcm_sf2_cfp_op(priv, stat->ram_loc | OP_SEL_READ); 1273 if (ret) 1274 continue; 1275 1276 iter = (i - 1) * s + j; 1277 data[iter] = core_readl(priv, stat->offset); 1278 } 1279 1280 } 1281 mutex_unlock(&priv->cfp.lock); 1282 } 1283 1284 int bcm_sf2_cfp_get_sset_count(struct dsa_switch *ds, int port, int sset) 1285 { 1286 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 1287 1288 if (sset != ETH_SS_STATS) 1289 return 0; 1290 1291 /* 3 counters per CFP rules */ 1292 return (priv->num_cfp_rules - 1) * ARRAY_SIZE(bcm_sf2_cfp_stats); 1293 } 1294