1 /* 2 * Broadcom Starfighter 2 DSA switch CFP support 3 * 4 * Copyright (C) 2016, Broadcom 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/list.h> 13 #include <linux/ethtool.h> 14 #include <linux/if_ether.h> 15 #include <linux/in.h> 16 #include <linux/netdevice.h> 17 #include <net/dsa.h> 18 #include <linux/bitmap.h> 19 #include <net/flow_offload.h> 20 21 #include "bcm_sf2.h" 22 #include "bcm_sf2_regs.h" 23 24 struct cfp_rule { 25 int port; 26 struct ethtool_rx_flow_spec fs; 27 struct list_head next; 28 }; 29 30 struct cfp_udf_slice_layout { 31 u8 slices[UDFS_PER_SLICE]; 32 u32 mask_value; 33 u32 base_offset; 34 }; 35 36 struct cfp_udf_layout { 37 struct cfp_udf_slice_layout udfs[UDF_NUM_SLICES]; 38 }; 39 40 static const u8 zero_slice[UDFS_PER_SLICE] = { }; 41 42 /* UDF slices layout for a TCPv4/UDPv4 specification */ 43 static const struct cfp_udf_layout udf_tcpip4_layout = { 44 .udfs = { 45 [1] = { 46 .slices = { 47 /* End of L2, byte offset 12, src IP[0:15] */ 48 CFG_UDF_EOL2 | 6, 49 /* End of L2, byte offset 14, src IP[16:31] */ 50 CFG_UDF_EOL2 | 7, 51 /* End of L2, byte offset 16, dst IP[0:15] */ 52 CFG_UDF_EOL2 | 8, 53 /* End of L2, byte offset 18, dst IP[16:31] */ 54 CFG_UDF_EOL2 | 9, 55 /* End of L3, byte offset 0, src port */ 56 CFG_UDF_EOL3 | 0, 57 /* End of L3, byte offset 2, dst port */ 58 CFG_UDF_EOL3 | 1, 59 0, 0, 0 60 }, 61 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG, 62 .base_offset = CORE_UDF_0_A_0_8_PORT_0 + UDF_SLICE_OFFSET, 63 }, 64 }, 65 }; 66 67 /* UDF slices layout for a TCPv6/UDPv6 specification */ 68 static const struct cfp_udf_layout udf_tcpip6_layout = { 69 .udfs = { 70 [0] = { 71 .slices = { 72 /* End of L2, byte offset 8, src IP[0:15] */ 73 CFG_UDF_EOL2 | 4, 74 /* End of L2, byte offset 10, src IP[16:31] */ 75 CFG_UDF_EOL2 | 5, 76 /* End of L2, byte offset 12, src IP[32:47] */ 77 CFG_UDF_EOL2 | 6, 78 /* End of L2, byte offset 14, src IP[48:63] */ 79 CFG_UDF_EOL2 | 7, 80 /* End of L2, byte offset 16, src IP[64:79] */ 81 CFG_UDF_EOL2 | 8, 82 /* End of L2, byte offset 18, src IP[80:95] */ 83 CFG_UDF_EOL2 | 9, 84 /* End of L2, byte offset 20, src IP[96:111] */ 85 CFG_UDF_EOL2 | 10, 86 /* End of L2, byte offset 22, src IP[112:127] */ 87 CFG_UDF_EOL2 | 11, 88 /* End of L3, byte offset 0, src port */ 89 CFG_UDF_EOL3 | 0, 90 }, 91 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG, 92 .base_offset = CORE_UDF_0_B_0_8_PORT_0, 93 }, 94 [3] = { 95 .slices = { 96 /* End of L2, byte offset 24, dst IP[0:15] */ 97 CFG_UDF_EOL2 | 12, 98 /* End of L2, byte offset 26, dst IP[16:31] */ 99 CFG_UDF_EOL2 | 13, 100 /* End of L2, byte offset 28, dst IP[32:47] */ 101 CFG_UDF_EOL2 | 14, 102 /* End of L2, byte offset 30, dst IP[48:63] */ 103 CFG_UDF_EOL2 | 15, 104 /* End of L2, byte offset 32, dst IP[64:79] */ 105 CFG_UDF_EOL2 | 16, 106 /* End of L2, byte offset 34, dst IP[80:95] */ 107 CFG_UDF_EOL2 | 17, 108 /* End of L2, byte offset 36, dst IP[96:111] */ 109 CFG_UDF_EOL2 | 18, 110 /* End of L2, byte offset 38, dst IP[112:127] */ 111 CFG_UDF_EOL2 | 19, 112 /* End of L3, byte offset 2, dst port */ 113 CFG_UDF_EOL3 | 1, 114 }, 115 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG, 116 .base_offset = CORE_UDF_0_D_0_11_PORT_0, 117 }, 118 }, 119 }; 120 121 static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout) 122 { 123 unsigned int i, count = 0; 124 125 for (i = 0; i < UDFS_PER_SLICE; i++) { 126 if (layout[i] != 0) 127 count++; 128 } 129 130 return count; 131 } 132 133 static inline u32 udf_upper_bits(unsigned int num_udf) 134 { 135 return GENMASK(num_udf - 1, 0) >> (UDFS_PER_SLICE - 1); 136 } 137 138 static inline u32 udf_lower_bits(unsigned int num_udf) 139 { 140 return (u8)GENMASK(num_udf - 1, 0); 141 } 142 143 static unsigned int bcm_sf2_get_slice_number(const struct cfp_udf_layout *l, 144 unsigned int start) 145 { 146 const struct cfp_udf_slice_layout *slice_layout; 147 unsigned int slice_idx; 148 149 for (slice_idx = start; slice_idx < UDF_NUM_SLICES; slice_idx++) { 150 slice_layout = &l->udfs[slice_idx]; 151 if (memcmp(slice_layout->slices, zero_slice, 152 sizeof(zero_slice))) 153 break; 154 } 155 156 return slice_idx; 157 } 158 159 static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv, 160 const struct cfp_udf_layout *layout, 161 unsigned int slice_num) 162 { 163 u32 offset = layout->udfs[slice_num].base_offset; 164 unsigned int i; 165 166 for (i = 0; i < UDFS_PER_SLICE; i++) 167 core_writel(priv, layout->udfs[slice_num].slices[i], 168 offset + i * 4); 169 } 170 171 static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op) 172 { 173 unsigned int timeout = 1000; 174 u32 reg; 175 176 reg = core_readl(priv, CORE_CFP_ACC); 177 reg &= ~(OP_SEL_MASK | RAM_SEL_MASK); 178 reg |= OP_STR_DONE | op; 179 core_writel(priv, reg, CORE_CFP_ACC); 180 181 do { 182 reg = core_readl(priv, CORE_CFP_ACC); 183 if (!(reg & OP_STR_DONE)) 184 break; 185 186 cpu_relax(); 187 } while (timeout--); 188 189 if (!timeout) 190 return -ETIMEDOUT; 191 192 return 0; 193 } 194 195 static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv, 196 unsigned int addr) 197 { 198 u32 reg; 199 200 WARN_ON(addr >= priv->num_cfp_rules); 201 202 reg = core_readl(priv, CORE_CFP_ACC); 203 reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT); 204 reg |= addr << XCESS_ADDR_SHIFT; 205 core_writel(priv, reg, CORE_CFP_ACC); 206 } 207 208 static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv) 209 { 210 /* Entry #0 is reserved */ 211 return priv->num_cfp_rules - 1; 212 } 213 214 static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv, 215 unsigned int rule_index, 216 int src_port, 217 unsigned int port_num, 218 unsigned int queue_num, 219 bool fwd_map_change) 220 { 221 int ret; 222 u32 reg; 223 224 /* Replace ARL derived destination with DST_MAP derived, define 225 * which port and queue this should be forwarded to. 226 */ 227 if (fwd_map_change) 228 reg = CHANGE_FWRD_MAP_IB_REP_ARL | 229 BIT(port_num + DST_MAP_IB_SHIFT) | 230 CHANGE_TC | queue_num << NEW_TC_SHIFT; 231 else 232 reg = 0; 233 234 /* Enable looping back to the original port */ 235 if (src_port == port_num) 236 reg |= LOOP_BK_EN; 237 238 core_writel(priv, reg, CORE_ACT_POL_DATA0); 239 240 /* Set classification ID that needs to be put in Broadcom tag */ 241 core_writel(priv, rule_index << CHAIN_ID_SHIFT, CORE_ACT_POL_DATA1); 242 243 core_writel(priv, 0, CORE_ACT_POL_DATA2); 244 245 /* Configure policer RAM now */ 246 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM); 247 if (ret) { 248 pr_err("Policer entry at %d failed\n", rule_index); 249 return ret; 250 } 251 252 /* Disable the policer */ 253 core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0); 254 255 /* Now the rate meter */ 256 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM); 257 if (ret) { 258 pr_err("Meter entry at %d failed\n", rule_index); 259 return ret; 260 } 261 262 return 0; 263 } 264 265 static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv, 266 struct flow_dissector_key_ipv4_addrs *addrs, 267 struct flow_dissector_key_ports *ports, 268 unsigned int slice_num, 269 bool mask) 270 { 271 u32 reg, offset; 272 273 /* C-Tag [31:24] 274 * UDF_n_A8 [23:8] 275 * UDF_n_A7 [7:0] 276 */ 277 reg = 0; 278 if (mask) 279 offset = CORE_CFP_MASK_PORT(4); 280 else 281 offset = CORE_CFP_DATA_PORT(4); 282 core_writel(priv, reg, offset); 283 284 /* UDF_n_A7 [31:24] 285 * UDF_n_A6 [23:8] 286 * UDF_n_A5 [7:0] 287 */ 288 reg = be16_to_cpu(ports->dst) >> 8; 289 if (mask) 290 offset = CORE_CFP_MASK_PORT(3); 291 else 292 offset = CORE_CFP_DATA_PORT(3); 293 core_writel(priv, reg, offset); 294 295 /* UDF_n_A5 [31:24] 296 * UDF_n_A4 [23:8] 297 * UDF_n_A3 [7:0] 298 */ 299 reg = (be16_to_cpu(ports->dst) & 0xff) << 24 | 300 (u32)be16_to_cpu(ports->src) << 8 | 301 (be32_to_cpu(addrs->dst) & 0x0000ff00) >> 8; 302 if (mask) 303 offset = CORE_CFP_MASK_PORT(2); 304 else 305 offset = CORE_CFP_DATA_PORT(2); 306 core_writel(priv, reg, offset); 307 308 /* UDF_n_A3 [31:24] 309 * UDF_n_A2 [23:8] 310 * UDF_n_A1 [7:0] 311 */ 312 reg = (u32)(be32_to_cpu(addrs->dst) & 0xff) << 24 | 313 (u32)(be32_to_cpu(addrs->dst) >> 16) << 8 | 314 (be32_to_cpu(addrs->src) & 0x0000ff00) >> 8; 315 if (mask) 316 offset = CORE_CFP_MASK_PORT(1); 317 else 318 offset = CORE_CFP_DATA_PORT(1); 319 core_writel(priv, reg, offset); 320 321 /* UDF_n_A1 [31:24] 322 * UDF_n_A0 [23:8] 323 * Reserved [7:4] 324 * Slice ID [3:2] 325 * Slice valid [1:0] 326 */ 327 reg = (u32)(be32_to_cpu(addrs->src) & 0xff) << 24 | 328 (u32)(be32_to_cpu(addrs->src) >> 16) << 8 | 329 SLICE_NUM(slice_num) | SLICE_VALID; 330 if (mask) 331 offset = CORE_CFP_MASK_PORT(0); 332 else 333 offset = CORE_CFP_DATA_PORT(0); 334 core_writel(priv, reg, offset); 335 } 336 337 static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, 338 unsigned int port_num, 339 unsigned int queue_num, 340 struct ethtool_rx_flow_spec *fs) 341 { 342 struct ethtool_rx_flow_spec_input input = {}; 343 const struct cfp_udf_layout *layout; 344 unsigned int slice_num, rule_index; 345 struct ethtool_rx_flow_rule *flow; 346 struct flow_match_ipv4_addrs ipv4; 347 struct flow_match_ports ports; 348 struct flow_match_ip ip; 349 u8 ip_proto, ip_frag; 350 u8 num_udf; 351 u32 reg; 352 int ret; 353 354 switch (fs->flow_type & ~FLOW_EXT) { 355 case TCP_V4_FLOW: 356 ip_proto = IPPROTO_TCP; 357 break; 358 case UDP_V4_FLOW: 359 ip_proto = IPPROTO_UDP; 360 break; 361 default: 362 return -EINVAL; 363 } 364 365 ip_frag = be32_to_cpu(fs->m_ext.data[0]); 366 367 /* Locate the first rule available */ 368 if (fs->location == RX_CLS_LOC_ANY) 369 rule_index = find_first_zero_bit(priv->cfp.used, 370 priv->num_cfp_rules); 371 else 372 rule_index = fs->location; 373 374 if (rule_index > bcm_sf2_cfp_rule_size(priv)) 375 return -ENOSPC; 376 377 input.fs = fs; 378 flow = ethtool_rx_flow_rule_create(&input); 379 if (IS_ERR(flow)) 380 return PTR_ERR(flow); 381 382 flow_rule_match_ipv4_addrs(flow->rule, &ipv4); 383 flow_rule_match_ports(flow->rule, &ports); 384 flow_rule_match_ip(flow->rule, &ip); 385 386 layout = &udf_tcpip4_layout; 387 /* We only use one UDF slice for now */ 388 slice_num = bcm_sf2_get_slice_number(layout, 0); 389 if (slice_num == UDF_NUM_SLICES) { 390 ret = -EINVAL; 391 goto out_err_flow_rule; 392 } 393 394 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices); 395 396 /* Apply the UDF layout for this filter */ 397 bcm_sf2_cfp_udf_set(priv, layout, slice_num); 398 399 /* Apply to all packets received through this port */ 400 core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7)); 401 402 /* Source port map match */ 403 core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7)); 404 405 /* S-Tag status [31:30] 406 * C-Tag status [29:28] 407 * L2 framing [27:26] 408 * L3 framing [25:24] 409 * IP ToS [23:16] 410 * IP proto [15:08] 411 * IP Fragm [7] 412 * Non 1st frag [6] 413 * IP Authen [5] 414 * TTL range [4:3] 415 * PPPoE session [2] 416 * Reserved [1] 417 * UDF_Valid[8] [0] 418 */ 419 core_writel(priv, ip.key->tos << IPTOS_SHIFT | 420 ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT | 421 udf_upper_bits(num_udf), 422 CORE_CFP_DATA_PORT(6)); 423 424 /* Mask with the specific layout for IPv4 packets */ 425 core_writel(priv, layout->udfs[slice_num].mask_value | 426 udf_upper_bits(num_udf), CORE_CFP_MASK_PORT(6)); 427 428 /* UDF_Valid[7:0] [31:24] 429 * S-Tag [23:8] 430 * C-Tag [7:0] 431 */ 432 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_DATA_PORT(5)); 433 434 /* Mask all but valid UDFs */ 435 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5)); 436 437 /* Program the match and the mask */ 438 bcm_sf2_cfp_slice_ipv4(priv, ipv4.key, ports.key, slice_num, false); 439 bcm_sf2_cfp_slice_ipv4(priv, ipv4.mask, ports.mask, SLICE_NUM_MASK, true); 440 441 /* Insert into TCAM now */ 442 bcm_sf2_cfp_rule_addr_set(priv, rule_index); 443 444 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); 445 if (ret) { 446 pr_err("TCAM entry at addr %d failed\n", rule_index); 447 goto out_err_flow_rule; 448 } 449 450 /* Insert into Action and policer RAMs now */ 451 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port, port_num, 452 queue_num, true); 453 if (ret) 454 goto out_err_flow_rule; 455 456 /* Turn on CFP for this rule now */ 457 reg = core_readl(priv, CORE_CFP_CTL_REG); 458 reg |= BIT(port); 459 core_writel(priv, reg, CORE_CFP_CTL_REG); 460 461 /* Flag the rule as being used and return it */ 462 set_bit(rule_index, priv->cfp.used); 463 set_bit(rule_index, priv->cfp.unique); 464 fs->location = rule_index; 465 466 return 0; 467 468 out_err_flow_rule: 469 ethtool_rx_flow_rule_destroy(flow); 470 return ret; 471 } 472 473 static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv, 474 const __be32 *ip6_addr, const __be16 port, 475 unsigned int slice_num, 476 bool mask) 477 { 478 u32 reg, tmp, val, offset; 479 480 /* C-Tag [31:24] 481 * UDF_n_B8 [23:8] (port) 482 * UDF_n_B7 (upper) [7:0] (addr[15:8]) 483 */ 484 reg = be32_to_cpu(ip6_addr[3]); 485 val = (u32)be16_to_cpu(port) << 8 | ((reg >> 8) & 0xff); 486 if (mask) 487 offset = CORE_CFP_MASK_PORT(4); 488 else 489 offset = CORE_CFP_DATA_PORT(4); 490 core_writel(priv, val, offset); 491 492 /* UDF_n_B7 (lower) [31:24] (addr[7:0]) 493 * UDF_n_B6 [23:8] (addr[31:16]) 494 * UDF_n_B5 (upper) [7:0] (addr[47:40]) 495 */ 496 tmp = be32_to_cpu(ip6_addr[2]); 497 val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 | 498 ((tmp >> 8) & 0xff); 499 if (mask) 500 offset = CORE_CFP_MASK_PORT(3); 501 else 502 offset = CORE_CFP_DATA_PORT(3); 503 core_writel(priv, val, offset); 504 505 /* UDF_n_B5 (lower) [31:24] (addr[39:32]) 506 * UDF_n_B4 [23:8] (addr[63:48]) 507 * UDF_n_B3 (upper) [7:0] (addr[79:72]) 508 */ 509 reg = be32_to_cpu(ip6_addr[1]); 510 val = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 | 511 ((reg >> 8) & 0xff); 512 if (mask) 513 offset = CORE_CFP_MASK_PORT(2); 514 else 515 offset = CORE_CFP_DATA_PORT(2); 516 core_writel(priv, val, offset); 517 518 /* UDF_n_B3 (lower) [31:24] (addr[71:64]) 519 * UDF_n_B2 [23:8] (addr[95:80]) 520 * UDF_n_B1 (upper) [7:0] (addr[111:104]) 521 */ 522 tmp = be32_to_cpu(ip6_addr[0]); 523 val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 | 524 ((tmp >> 8) & 0xff); 525 if (mask) 526 offset = CORE_CFP_MASK_PORT(1); 527 else 528 offset = CORE_CFP_DATA_PORT(1); 529 core_writel(priv, val, offset); 530 531 /* UDF_n_B1 (lower) [31:24] (addr[103:96]) 532 * UDF_n_B0 [23:8] (addr[127:112]) 533 * Reserved [7:4] 534 * Slice ID [3:2] 535 * Slice valid [1:0] 536 */ 537 reg = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 | 538 SLICE_NUM(slice_num) | SLICE_VALID; 539 if (mask) 540 offset = CORE_CFP_MASK_PORT(0); 541 else 542 offset = CORE_CFP_DATA_PORT(0); 543 core_writel(priv, reg, offset); 544 } 545 546 static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv, 547 int port, u32 location) 548 { 549 struct cfp_rule *rule = NULL; 550 551 list_for_each_entry(rule, &priv->cfp.rules_list, next) { 552 if (rule->port == port && rule->fs.location == location) 553 break; 554 } 555 556 return rule; 557 } 558 559 static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port, 560 struct ethtool_rx_flow_spec *fs) 561 { 562 struct cfp_rule *rule = NULL; 563 size_t fs_size = 0; 564 int ret = 1; 565 566 if (list_empty(&priv->cfp.rules_list)) 567 return ret; 568 569 list_for_each_entry(rule, &priv->cfp.rules_list, next) { 570 ret = 1; 571 if (rule->port != port) 572 continue; 573 574 if (rule->fs.flow_type != fs->flow_type || 575 rule->fs.ring_cookie != fs->ring_cookie || 576 rule->fs.m_ext.data[0] != fs->m_ext.data[0]) 577 continue; 578 579 switch (fs->flow_type & ~FLOW_EXT) { 580 case TCP_V6_FLOW: 581 case UDP_V6_FLOW: 582 fs_size = sizeof(struct ethtool_tcpip6_spec); 583 break; 584 case TCP_V4_FLOW: 585 case UDP_V4_FLOW: 586 fs_size = sizeof(struct ethtool_tcpip4_spec); 587 break; 588 default: 589 continue; 590 } 591 592 ret = memcmp(&rule->fs.h_u, &fs->h_u, fs_size); 593 ret |= memcmp(&rule->fs.m_u, &fs->m_u, fs_size); 594 if (ret == 0) 595 break; 596 } 597 598 return ret; 599 } 600 601 static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, 602 unsigned int port_num, 603 unsigned int queue_num, 604 struct ethtool_rx_flow_spec *fs) 605 { 606 struct ethtool_rx_flow_spec_input input = {}; 607 unsigned int slice_num, rule_index[2]; 608 const struct cfp_udf_layout *layout; 609 struct ethtool_rx_flow_rule *flow; 610 struct flow_match_ipv6_addrs ipv6; 611 struct flow_match_ports ports; 612 u8 ip_proto, ip_frag; 613 int ret = 0; 614 u8 num_udf; 615 u32 reg; 616 617 switch (fs->flow_type & ~FLOW_EXT) { 618 case TCP_V6_FLOW: 619 ip_proto = IPPROTO_TCP; 620 break; 621 case UDP_V6_FLOW: 622 ip_proto = IPPROTO_UDP; 623 break; 624 default: 625 return -EINVAL; 626 } 627 628 ip_frag = be32_to_cpu(fs->m_ext.data[0]); 629 630 layout = &udf_tcpip6_layout; 631 slice_num = bcm_sf2_get_slice_number(layout, 0); 632 if (slice_num == UDF_NUM_SLICES) 633 return -EINVAL; 634 635 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices); 636 637 /* Negotiate two indexes, one for the second half which we are chained 638 * from, which is what we will return to user-space, and a second one 639 * which is used to store its first half. That first half does not 640 * allow any choice of placement, so it just needs to find the next 641 * available bit. We return the second half as fs->location because 642 * that helps with the rule lookup later on since the second half is 643 * chained from its first half, we can easily identify IPv6 CFP rules 644 * by looking whether they carry a CHAIN_ID. 645 * 646 * We also want the second half to have a lower rule_index than its 647 * first half because the HW search is by incrementing addresses. 648 */ 649 if (fs->location == RX_CLS_LOC_ANY) 650 rule_index[1] = find_first_zero_bit(priv->cfp.used, 651 priv->num_cfp_rules); 652 else 653 rule_index[1] = fs->location; 654 if (rule_index[1] > bcm_sf2_cfp_rule_size(priv)) 655 return -ENOSPC; 656 657 /* Flag it as used (cleared on error path) such that we can immediately 658 * obtain a second one to chain from. 659 */ 660 set_bit(rule_index[1], priv->cfp.used); 661 662 rule_index[0] = find_first_zero_bit(priv->cfp.used, 663 priv->num_cfp_rules); 664 if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) { 665 ret = -ENOSPC; 666 goto out_err; 667 } 668 669 input.fs = fs; 670 flow = ethtool_rx_flow_rule_create(&input); 671 if (IS_ERR(flow)) { 672 ret = PTR_ERR(flow); 673 goto out_err; 674 } 675 flow_rule_match_ipv6_addrs(flow->rule, &ipv6); 676 flow_rule_match_ports(flow->rule, &ports); 677 678 /* Apply the UDF layout for this filter */ 679 bcm_sf2_cfp_udf_set(priv, layout, slice_num); 680 681 /* Apply to all packets received through this port */ 682 core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7)); 683 684 /* Source port map match */ 685 core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7)); 686 687 /* S-Tag status [31:30] 688 * C-Tag status [29:28] 689 * L2 framing [27:26] 690 * L3 framing [25:24] 691 * IP ToS [23:16] 692 * IP proto [15:08] 693 * IP Fragm [7] 694 * Non 1st frag [6] 695 * IP Authen [5] 696 * TTL range [4:3] 697 * PPPoE session [2] 698 * Reserved [1] 699 * UDF_Valid[8] [0] 700 */ 701 reg = 1 << L3_FRAMING_SHIFT | ip_proto << IPPROTO_SHIFT | 702 ip_frag << IP_FRAG_SHIFT | udf_upper_bits(num_udf); 703 core_writel(priv, reg, CORE_CFP_DATA_PORT(6)); 704 705 /* Mask with the specific layout for IPv6 packets including 706 * UDF_Valid[8] 707 */ 708 reg = layout->udfs[slice_num].mask_value | udf_upper_bits(num_udf); 709 core_writel(priv, reg, CORE_CFP_MASK_PORT(6)); 710 711 /* UDF_Valid[7:0] [31:24] 712 * S-Tag [23:8] 713 * C-Tag [7:0] 714 */ 715 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_DATA_PORT(5)); 716 717 /* Mask all but valid UDFs */ 718 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5)); 719 720 /* Slice the IPv6 source address and port */ 721 bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->src.in6_u.u6_addr32, 722 ports.key->src, slice_num, false); 723 bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->src.in6_u.u6_addr32, 724 ports.mask->src, SLICE_NUM_MASK, true); 725 726 /* Insert into TCAM now because we need to insert a second rule */ 727 bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]); 728 729 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); 730 if (ret) { 731 pr_err("TCAM entry at addr %d failed\n", rule_index[0]); 732 goto out_err_flow_rule; 733 } 734 735 /* Insert into Action and policer RAMs now */ 736 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port, port_num, 737 queue_num, false); 738 if (ret) 739 goto out_err_flow_rule; 740 741 /* Now deal with the second slice to chain this rule */ 742 slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1); 743 if (slice_num == UDF_NUM_SLICES) { 744 ret = -EINVAL; 745 goto out_err_flow_rule; 746 } 747 748 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices); 749 750 /* Apply the UDF layout for this filter */ 751 bcm_sf2_cfp_udf_set(priv, layout, slice_num); 752 753 /* Chained rule, source port match is coming from the rule we are 754 * chained from. 755 */ 756 core_writel(priv, 0, CORE_CFP_DATA_PORT(7)); 757 core_writel(priv, 0, CORE_CFP_MASK_PORT(7)); 758 759 /* 760 * CHAIN ID [31:24] chain to previous slice 761 * Reserved [23:20] 762 * UDF_Valid[11:8] [19:16] 763 * UDF_Valid[7:0] [15:8] 764 * UDF_n_D11 [7:0] 765 */ 766 reg = rule_index[0] << 24 | udf_upper_bits(num_udf) << 16 | 767 udf_lower_bits(num_udf) << 8; 768 core_writel(priv, reg, CORE_CFP_DATA_PORT(6)); 769 770 /* Mask all except chain ID, UDF Valid[8] and UDF Valid[7:0] */ 771 reg = XCESS_ADDR_MASK << 24 | udf_upper_bits(num_udf) << 16 | 772 udf_lower_bits(num_udf) << 8; 773 core_writel(priv, reg, CORE_CFP_MASK_PORT(6)); 774 775 /* Don't care */ 776 core_writel(priv, 0, CORE_CFP_DATA_PORT(5)); 777 778 /* Mask all */ 779 core_writel(priv, 0, CORE_CFP_MASK_PORT(5)); 780 781 bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->dst.in6_u.u6_addr32, 782 ports.key->dst, slice_num, false); 783 bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->dst.in6_u.u6_addr32, 784 ports.key->dst, SLICE_NUM_MASK, true); 785 786 /* Insert into TCAM now */ 787 bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]); 788 789 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); 790 if (ret) { 791 pr_err("TCAM entry at addr %d failed\n", rule_index[1]); 792 goto out_err_flow_rule; 793 } 794 795 /* Insert into Action and policer RAMs now, set chain ID to 796 * the one we are chained to 797 */ 798 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port, port_num, 799 queue_num, true); 800 if (ret) 801 goto out_err_flow_rule; 802 803 /* Turn on CFP for this rule now */ 804 reg = core_readl(priv, CORE_CFP_CTL_REG); 805 reg |= BIT(port); 806 core_writel(priv, reg, CORE_CFP_CTL_REG); 807 808 /* Flag the second half rule as being used now, return it as the 809 * location, and flag it as unique while dumping rules 810 */ 811 set_bit(rule_index[0], priv->cfp.used); 812 set_bit(rule_index[1], priv->cfp.unique); 813 fs->location = rule_index[1]; 814 815 return ret; 816 817 out_err_flow_rule: 818 ethtool_rx_flow_rule_destroy(flow); 819 out_err: 820 clear_bit(rule_index[1], priv->cfp.used); 821 return ret; 822 } 823 824 static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port, 825 struct ethtool_rx_flow_spec *fs) 826 { 827 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 828 s8 cpu_port = ds->ports[port].cpu_dp->index; 829 __u64 ring_cookie = fs->ring_cookie; 830 unsigned int queue_num, port_num; 831 int ret; 832 833 /* This rule is a Wake-on-LAN filter and we must specifically 834 * target the CPU port in order for it to be working. 835 */ 836 if (ring_cookie == RX_CLS_FLOW_WAKE) 837 ring_cookie = cpu_port * SF2_NUM_EGRESS_QUEUES; 838 839 /* We do not support discarding packets, check that the 840 * destination port is enabled and that we are within the 841 * number of ports supported by the switch 842 */ 843 port_num = ring_cookie / SF2_NUM_EGRESS_QUEUES; 844 845 if (ring_cookie == RX_CLS_FLOW_DISC || 846 !(dsa_is_user_port(ds, port_num) || 847 dsa_is_cpu_port(ds, port_num)) || 848 port_num >= priv->hw_params.num_ports) 849 return -EINVAL; 850 /* 851 * We have a small oddity where Port 6 just does not have a 852 * valid bit here (so we substract by one). 853 */ 854 queue_num = ring_cookie % SF2_NUM_EGRESS_QUEUES; 855 if (port_num >= 7) 856 port_num -= 1; 857 858 switch (fs->flow_type & ~FLOW_EXT) { 859 case TCP_V4_FLOW: 860 case UDP_V4_FLOW: 861 ret = bcm_sf2_cfp_ipv4_rule_set(priv, port, port_num, 862 queue_num, fs); 863 break; 864 case TCP_V6_FLOW: 865 case UDP_V6_FLOW: 866 ret = bcm_sf2_cfp_ipv6_rule_set(priv, port, port_num, 867 queue_num, fs); 868 break; 869 default: 870 ret = -EINVAL; 871 break; 872 } 873 874 return ret; 875 } 876 877 static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, 878 struct ethtool_rx_flow_spec *fs) 879 { 880 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 881 struct cfp_rule *rule = NULL; 882 int ret = -EINVAL; 883 884 /* Check for unsupported extensions */ 885 if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype || 886 fs->m_ext.data[1])) 887 return -EINVAL; 888 889 if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES) 890 return -EINVAL; 891 892 if (fs->location != RX_CLS_LOC_ANY && 893 test_bit(fs->location, priv->cfp.used)) 894 return -EBUSY; 895 896 if (fs->location != RX_CLS_LOC_ANY && 897 fs->location > bcm_sf2_cfp_rule_size(priv)) 898 return -EINVAL; 899 900 ret = bcm_sf2_cfp_rule_cmp(priv, port, fs); 901 if (ret == 0) 902 return -EEXIST; 903 904 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 905 if (!rule) 906 return -ENOMEM; 907 908 ret = bcm_sf2_cfp_rule_insert(ds, port, fs); 909 if (ret) { 910 kfree(rule); 911 return ret; 912 } 913 914 rule->port = port; 915 memcpy(&rule->fs, fs, sizeof(*fs)); 916 list_add_tail(&rule->next, &priv->cfp.rules_list); 917 918 return ret; 919 } 920 921 static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port, 922 u32 loc, u32 *next_loc) 923 { 924 int ret; 925 u32 reg; 926 927 /* Indicate which rule we want to read */ 928 bcm_sf2_cfp_rule_addr_set(priv, loc); 929 930 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL); 931 if (ret) 932 return ret; 933 934 /* Check if this is possibly an IPv6 rule that would 935 * indicate we need to delete its companion rule 936 * as well 937 */ 938 reg = core_readl(priv, CORE_CFP_DATA_PORT(6)); 939 if (next_loc) 940 *next_loc = (reg >> 24) & CHAIN_ID_MASK; 941 942 /* Clear its valid bits */ 943 reg = core_readl(priv, CORE_CFP_DATA_PORT(0)); 944 reg &= ~SLICE_VALID; 945 core_writel(priv, reg, CORE_CFP_DATA_PORT(0)); 946 947 /* Write back this entry into the TCAM now */ 948 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); 949 if (ret) 950 return ret; 951 952 clear_bit(loc, priv->cfp.used); 953 clear_bit(loc, priv->cfp.unique); 954 955 return 0; 956 } 957 958 static int bcm_sf2_cfp_rule_remove(struct bcm_sf2_priv *priv, int port, 959 u32 loc) 960 { 961 u32 next_loc = 0; 962 int ret; 963 964 ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc); 965 if (ret) 966 return ret; 967 968 /* If this was an IPv6 rule, delete is companion rule too */ 969 if (next_loc) 970 ret = bcm_sf2_cfp_rule_del_one(priv, port, next_loc, NULL); 971 972 return ret; 973 } 974 975 static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc) 976 { 977 struct cfp_rule *rule; 978 int ret; 979 980 if (loc >= CFP_NUM_RULES) 981 return -EINVAL; 982 983 /* Refuse deleting unused rules, and those that are not unique since 984 * that could leave IPv6 rules with one of the chained rule in the 985 * table. 986 */ 987 if (!test_bit(loc, priv->cfp.unique) || loc == 0) 988 return -EINVAL; 989 990 rule = bcm_sf2_cfp_rule_find(priv, port, loc); 991 if (!rule) 992 return -EINVAL; 993 994 ret = bcm_sf2_cfp_rule_remove(priv, port, loc); 995 996 list_del(&rule->next); 997 kfree(rule); 998 999 return ret; 1000 } 1001 1002 static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow) 1003 { 1004 unsigned int i; 1005 1006 for (i = 0; i < sizeof(flow->m_u); i++) 1007 flow->m_u.hdata[i] ^= 0xff; 1008 1009 flow->m_ext.vlan_etype ^= cpu_to_be16(~0); 1010 flow->m_ext.vlan_tci ^= cpu_to_be16(~0); 1011 flow->m_ext.data[0] ^= cpu_to_be32(~0); 1012 flow->m_ext.data[1] ^= cpu_to_be32(~0); 1013 } 1014 1015 static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port, 1016 struct ethtool_rxnfc *nfc) 1017 { 1018 struct cfp_rule *rule; 1019 1020 rule = bcm_sf2_cfp_rule_find(priv, port, nfc->fs.location); 1021 if (!rule) 1022 return -EINVAL; 1023 1024 memcpy(&nfc->fs, &rule->fs, sizeof(rule->fs)); 1025 1026 bcm_sf2_invert_masks(&nfc->fs); 1027 1028 /* Put the TCAM size here */ 1029 nfc->data = bcm_sf2_cfp_rule_size(priv); 1030 1031 return 0; 1032 } 1033 1034 /* We implement the search doing a TCAM search operation */ 1035 static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv, 1036 int port, struct ethtool_rxnfc *nfc, 1037 u32 *rule_locs) 1038 { 1039 unsigned int index = 1, rules_cnt = 0; 1040 1041 for_each_set_bit_from(index, priv->cfp.unique, priv->num_cfp_rules) { 1042 rule_locs[rules_cnt] = index; 1043 rules_cnt++; 1044 } 1045 1046 /* Put the TCAM size here */ 1047 nfc->data = bcm_sf2_cfp_rule_size(priv); 1048 nfc->rule_cnt = rules_cnt; 1049 1050 return 0; 1051 } 1052 1053 int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, 1054 struct ethtool_rxnfc *nfc, u32 *rule_locs) 1055 { 1056 struct net_device *p = ds->ports[port].cpu_dp->master; 1057 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 1058 int ret = 0; 1059 1060 mutex_lock(&priv->cfp.lock); 1061 1062 switch (nfc->cmd) { 1063 case ETHTOOL_GRXCLSRLCNT: 1064 /* Subtract the default, unusable rule */ 1065 nfc->rule_cnt = bitmap_weight(priv->cfp.unique, 1066 priv->num_cfp_rules) - 1; 1067 /* We support specifying rule locations */ 1068 nfc->data |= RX_CLS_LOC_SPECIAL; 1069 break; 1070 case ETHTOOL_GRXCLSRULE: 1071 ret = bcm_sf2_cfp_rule_get(priv, port, nfc); 1072 break; 1073 case ETHTOOL_GRXCLSRLALL: 1074 ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs); 1075 break; 1076 default: 1077 ret = -EOPNOTSUPP; 1078 break; 1079 } 1080 1081 mutex_unlock(&priv->cfp.lock); 1082 1083 if (ret) 1084 return ret; 1085 1086 /* Pass up the commands to the attached master network device */ 1087 if (p->ethtool_ops->get_rxnfc) { 1088 ret = p->ethtool_ops->get_rxnfc(p, nfc, rule_locs); 1089 if (ret == -EOPNOTSUPP) 1090 ret = 0; 1091 } 1092 1093 return ret; 1094 } 1095 1096 int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port, 1097 struct ethtool_rxnfc *nfc) 1098 { 1099 struct net_device *p = ds->ports[port].cpu_dp->master; 1100 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 1101 int ret = 0; 1102 1103 mutex_lock(&priv->cfp.lock); 1104 1105 switch (nfc->cmd) { 1106 case ETHTOOL_SRXCLSRLINS: 1107 ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs); 1108 break; 1109 1110 case ETHTOOL_SRXCLSRLDEL: 1111 ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location); 1112 break; 1113 default: 1114 ret = -EOPNOTSUPP; 1115 break; 1116 } 1117 1118 mutex_unlock(&priv->cfp.lock); 1119 1120 if (ret) 1121 return ret; 1122 1123 /* Pass up the commands to the attached master network device. 1124 * This can fail, so rollback the operation if we need to. 1125 */ 1126 if (p->ethtool_ops->set_rxnfc) { 1127 ret = p->ethtool_ops->set_rxnfc(p, nfc); 1128 if (ret && ret != -EOPNOTSUPP) { 1129 mutex_lock(&priv->cfp.lock); 1130 bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location); 1131 mutex_unlock(&priv->cfp.lock); 1132 } else { 1133 ret = 0; 1134 } 1135 } 1136 1137 return ret; 1138 } 1139 1140 int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv) 1141 { 1142 unsigned int timeout = 1000; 1143 u32 reg; 1144 1145 reg = core_readl(priv, CORE_CFP_ACC); 1146 reg |= TCAM_RESET; 1147 core_writel(priv, reg, CORE_CFP_ACC); 1148 1149 do { 1150 reg = core_readl(priv, CORE_CFP_ACC); 1151 if (!(reg & TCAM_RESET)) 1152 break; 1153 1154 cpu_relax(); 1155 } while (timeout--); 1156 1157 if (!timeout) 1158 return -ETIMEDOUT; 1159 1160 return 0; 1161 } 1162 1163 void bcm_sf2_cfp_exit(struct dsa_switch *ds) 1164 { 1165 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 1166 struct cfp_rule *rule, *n; 1167 1168 if (list_empty(&priv->cfp.rules_list)) 1169 return; 1170 1171 list_for_each_entry_safe_reverse(rule, n, &priv->cfp.rules_list, next) 1172 bcm_sf2_cfp_rule_del(priv, rule->port, rule->fs.location); 1173 } 1174 1175 int bcm_sf2_cfp_resume(struct dsa_switch *ds) 1176 { 1177 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 1178 struct cfp_rule *rule; 1179 int ret = 0; 1180 u32 reg; 1181 1182 if (list_empty(&priv->cfp.rules_list)) 1183 return ret; 1184 1185 reg = core_readl(priv, CORE_CFP_CTL_REG); 1186 reg &= ~CFP_EN_MAP_MASK; 1187 core_writel(priv, reg, CORE_CFP_CTL_REG); 1188 1189 ret = bcm_sf2_cfp_rst(priv); 1190 if (ret) 1191 return ret; 1192 1193 list_for_each_entry(rule, &priv->cfp.rules_list, next) { 1194 ret = bcm_sf2_cfp_rule_remove(priv, rule->port, 1195 rule->fs.location); 1196 if (ret) { 1197 dev_err(ds->dev, "failed to remove rule\n"); 1198 return ret; 1199 } 1200 1201 ret = bcm_sf2_cfp_rule_insert(ds, rule->port, &rule->fs); 1202 if (ret) { 1203 dev_err(ds->dev, "failed to restore rule\n"); 1204 return ret; 1205 } 1206 } 1207 1208 return ret; 1209 } 1210 1211 static const struct bcm_sf2_cfp_stat { 1212 unsigned int offset; 1213 unsigned int ram_loc; 1214 const char *name; 1215 } bcm_sf2_cfp_stats[] = { 1216 { 1217 .offset = CORE_STAT_GREEN_CNTR, 1218 .ram_loc = GREEN_STAT_RAM, 1219 .name = "Green" 1220 }, 1221 { 1222 .offset = CORE_STAT_YELLOW_CNTR, 1223 .ram_loc = YELLOW_STAT_RAM, 1224 .name = "Yellow" 1225 }, 1226 { 1227 .offset = CORE_STAT_RED_CNTR, 1228 .ram_loc = RED_STAT_RAM, 1229 .name = "Red" 1230 }, 1231 }; 1232 1233 void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port, 1234 u32 stringset, uint8_t *data) 1235 { 1236 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 1237 unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats); 1238 char buf[ETH_GSTRING_LEN]; 1239 unsigned int i, j, iter; 1240 1241 if (stringset != ETH_SS_STATS) 1242 return; 1243 1244 for (i = 1; i < priv->num_cfp_rules; i++) { 1245 for (j = 0; j < s; j++) { 1246 snprintf(buf, sizeof(buf), 1247 "CFP%03d_%sCntr", 1248 i, bcm_sf2_cfp_stats[j].name); 1249 iter = (i - 1) * s + j; 1250 strlcpy(data + iter * ETH_GSTRING_LEN, 1251 buf, ETH_GSTRING_LEN); 1252 } 1253 } 1254 } 1255 1256 void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch *ds, int port, 1257 uint64_t *data) 1258 { 1259 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 1260 unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats); 1261 const struct bcm_sf2_cfp_stat *stat; 1262 unsigned int i, j, iter; 1263 struct cfp_rule *rule; 1264 int ret; 1265 1266 mutex_lock(&priv->cfp.lock); 1267 for (i = 1; i < priv->num_cfp_rules; i++) { 1268 rule = bcm_sf2_cfp_rule_find(priv, port, i); 1269 if (!rule) 1270 continue; 1271 1272 for (j = 0; j < s; j++) { 1273 stat = &bcm_sf2_cfp_stats[j]; 1274 1275 bcm_sf2_cfp_rule_addr_set(priv, i); 1276 ret = bcm_sf2_cfp_op(priv, stat->ram_loc | OP_SEL_READ); 1277 if (ret) 1278 continue; 1279 1280 iter = (i - 1) * s + j; 1281 data[iter] = core_readl(priv, stat->offset); 1282 } 1283 1284 } 1285 mutex_unlock(&priv->cfp.lock); 1286 } 1287 1288 int bcm_sf2_cfp_get_sset_count(struct dsa_switch *ds, int port, int sset) 1289 { 1290 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 1291 1292 if (sset != ETH_SS_STATS) 1293 return 0; 1294 1295 /* 3 counters per CFP rules */ 1296 return (priv->num_cfp_rules - 1) * ARRAY_SIZE(bcm_sf2_cfp_stats); 1297 } 1298