1 /* 2 * Broadcom Starfighter 2 DSA switch CFP support 3 * 4 * Copyright (C) 2016, Broadcom 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/list.h> 13 #include <linux/ethtool.h> 14 #include <linux/if_ether.h> 15 #include <linux/in.h> 16 #include <linux/netdevice.h> 17 #include <net/dsa.h> 18 #include <linux/bitmap.h> 19 20 #include "bcm_sf2.h" 21 #include "bcm_sf2_regs.h" 22 23 struct cfp_udf_slice_layout { 24 u8 slices[UDFS_PER_SLICE]; 25 u32 mask_value; 26 u32 base_offset; 27 }; 28 29 struct cfp_udf_layout { 30 struct cfp_udf_slice_layout udfs[UDF_NUM_SLICES]; 31 }; 32 33 static const u8 zero_slice[UDFS_PER_SLICE] = { }; 34 35 /* UDF slices layout for a TCPv4/UDPv4 specification */ 36 static const struct cfp_udf_layout udf_tcpip4_layout = { 37 .udfs = { 38 [1] = { 39 .slices = { 40 /* End of L2, byte offset 12, src IP[0:15] */ 41 CFG_UDF_EOL2 | 6, 42 /* End of L2, byte offset 14, src IP[16:31] */ 43 CFG_UDF_EOL2 | 7, 44 /* End of L2, byte offset 16, dst IP[0:15] */ 45 CFG_UDF_EOL2 | 8, 46 /* End of L2, byte offset 18, dst IP[16:31] */ 47 CFG_UDF_EOL2 | 9, 48 /* End of L3, byte offset 0, src port */ 49 CFG_UDF_EOL3 | 0, 50 /* End of L3, byte offset 2, dst port */ 51 CFG_UDF_EOL3 | 1, 52 0, 0, 0 53 }, 54 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG, 55 .base_offset = CORE_UDF_0_A_0_8_PORT_0 + UDF_SLICE_OFFSET, 56 }, 57 }, 58 }; 59 60 /* UDF slices layout for a TCPv6/UDPv6 specification */ 61 static const struct cfp_udf_layout udf_tcpip6_layout = { 62 .udfs = { 63 [0] = { 64 .slices = { 65 /* End of L2, byte offset 8, src IP[0:15] */ 66 CFG_UDF_EOL2 | 4, 67 /* End of L2, byte offset 10, src IP[16:31] */ 68 CFG_UDF_EOL2 | 5, 69 /* End of L2, byte offset 12, src IP[32:47] */ 70 CFG_UDF_EOL2 | 6, 71 /* End of L2, byte offset 14, src IP[48:63] */ 72 CFG_UDF_EOL2 | 7, 73 /* End of L2, byte offset 16, src IP[64:79] */ 74 CFG_UDF_EOL2 | 8, 75 /* End of L2, byte offset 18, src IP[80:95] */ 76 CFG_UDF_EOL2 | 9, 77 /* End of L2, byte offset 20, src IP[96:111] */ 78 CFG_UDF_EOL2 | 10, 79 /* End of L2, byte offset 22, src IP[112:127] */ 80 CFG_UDF_EOL2 | 11, 81 /* End of L3, byte offset 0, src port */ 82 CFG_UDF_EOL3 | 0, 83 }, 84 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG, 85 .base_offset = CORE_UDF_0_B_0_8_PORT_0, 86 }, 87 [3] = { 88 .slices = { 89 /* End of L2, byte offset 24, dst IP[0:15] */ 90 CFG_UDF_EOL2 | 12, 91 /* End of L2, byte offset 26, dst IP[16:31] */ 92 CFG_UDF_EOL2 | 13, 93 /* End of L2, byte offset 28, dst IP[32:47] */ 94 CFG_UDF_EOL2 | 14, 95 /* End of L2, byte offset 30, dst IP[48:63] */ 96 CFG_UDF_EOL2 | 15, 97 /* End of L2, byte offset 32, dst IP[64:79] */ 98 CFG_UDF_EOL2 | 16, 99 /* End of L2, byte offset 34, dst IP[80:95] */ 100 CFG_UDF_EOL2 | 17, 101 /* End of L2, byte offset 36, dst IP[96:111] */ 102 CFG_UDF_EOL2 | 18, 103 /* End of L2, byte offset 38, dst IP[112:127] */ 104 CFG_UDF_EOL2 | 19, 105 /* End of L3, byte offset 2, dst port */ 106 CFG_UDF_EOL3 | 1, 107 }, 108 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG, 109 .base_offset = CORE_UDF_0_D_0_11_PORT_0, 110 }, 111 }, 112 }; 113 114 static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout) 115 { 116 unsigned int i, count = 0; 117 118 for (i = 0; i < UDFS_PER_SLICE; i++) { 119 if (layout[i] != 0) 120 count++; 121 } 122 123 return count; 124 } 125 126 static inline u32 udf_upper_bits(unsigned int num_udf) 127 { 128 return GENMASK(num_udf - 1, 0) >> (UDFS_PER_SLICE - 1); 129 } 130 131 static inline u32 udf_lower_bits(unsigned int num_udf) 132 { 133 return (u8)GENMASK(num_udf - 1, 0); 134 } 135 136 static unsigned int bcm_sf2_get_slice_number(const struct cfp_udf_layout *l, 137 unsigned int start) 138 { 139 const struct cfp_udf_slice_layout *slice_layout; 140 unsigned int slice_idx; 141 142 for (slice_idx = start; slice_idx < UDF_NUM_SLICES; slice_idx++) { 143 slice_layout = &l->udfs[slice_idx]; 144 if (memcmp(slice_layout->slices, zero_slice, 145 sizeof(zero_slice))) 146 break; 147 } 148 149 return slice_idx; 150 } 151 152 static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv, 153 const struct cfp_udf_layout *layout, 154 unsigned int slice_num) 155 { 156 u32 offset = layout->udfs[slice_num].base_offset; 157 unsigned int i; 158 159 for (i = 0; i < UDFS_PER_SLICE; i++) 160 core_writel(priv, layout->udfs[slice_num].slices[i], 161 offset + i * 4); 162 } 163 164 static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op) 165 { 166 unsigned int timeout = 1000; 167 u32 reg; 168 169 reg = core_readl(priv, CORE_CFP_ACC); 170 reg &= ~(OP_SEL_MASK | RAM_SEL_MASK); 171 reg |= OP_STR_DONE | op; 172 core_writel(priv, reg, CORE_CFP_ACC); 173 174 do { 175 reg = core_readl(priv, CORE_CFP_ACC); 176 if (!(reg & OP_STR_DONE)) 177 break; 178 179 cpu_relax(); 180 } while (timeout--); 181 182 if (!timeout) 183 return -ETIMEDOUT; 184 185 return 0; 186 } 187 188 static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv, 189 unsigned int addr) 190 { 191 u32 reg; 192 193 WARN_ON(addr >= priv->num_cfp_rules); 194 195 reg = core_readl(priv, CORE_CFP_ACC); 196 reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT); 197 reg |= addr << XCESS_ADDR_SHIFT; 198 core_writel(priv, reg, CORE_CFP_ACC); 199 } 200 201 static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv) 202 { 203 /* Entry #0 is reserved */ 204 return priv->num_cfp_rules - 1; 205 } 206 207 static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv, 208 unsigned int rule_index, 209 unsigned int port_num, 210 unsigned int queue_num, 211 bool fwd_map_change) 212 { 213 int ret; 214 u32 reg; 215 216 /* Replace ARL derived destination with DST_MAP derived, define 217 * which port and queue this should be forwarded to. 218 */ 219 if (fwd_map_change) 220 reg = CHANGE_FWRD_MAP_IB_REP_ARL | 221 BIT(port_num + DST_MAP_IB_SHIFT) | 222 CHANGE_TC | queue_num << NEW_TC_SHIFT; 223 else 224 reg = 0; 225 226 core_writel(priv, reg, CORE_ACT_POL_DATA0); 227 228 /* Set classification ID that needs to be put in Broadcom tag */ 229 core_writel(priv, rule_index << CHAIN_ID_SHIFT, CORE_ACT_POL_DATA1); 230 231 core_writel(priv, 0, CORE_ACT_POL_DATA2); 232 233 /* Configure policer RAM now */ 234 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM); 235 if (ret) { 236 pr_err("Policer entry at %d failed\n", rule_index); 237 return ret; 238 } 239 240 /* Disable the policer */ 241 core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0); 242 243 /* Now the rate meter */ 244 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM); 245 if (ret) { 246 pr_err("Meter entry at %d failed\n", rule_index); 247 return ret; 248 } 249 250 return 0; 251 } 252 253 static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv, 254 struct ethtool_tcpip4_spec *v4_spec, 255 unsigned int slice_num, 256 bool mask) 257 { 258 u32 reg, offset; 259 260 /* C-Tag [31:24] 261 * UDF_n_A8 [23:8] 262 * UDF_n_A7 [7:0] 263 */ 264 reg = 0; 265 if (mask) 266 offset = CORE_CFP_MASK_PORT(4); 267 else 268 offset = CORE_CFP_DATA_PORT(4); 269 core_writel(priv, reg, offset); 270 271 /* UDF_n_A7 [31:24] 272 * UDF_n_A6 [23:8] 273 * UDF_n_A5 [7:0] 274 */ 275 reg = be16_to_cpu(v4_spec->pdst) >> 8; 276 if (mask) 277 offset = CORE_CFP_MASK_PORT(3); 278 else 279 offset = CORE_CFP_DATA_PORT(3); 280 core_writel(priv, reg, offset); 281 282 /* UDF_n_A5 [31:24] 283 * UDF_n_A4 [23:8] 284 * UDF_n_A3 [7:0] 285 */ 286 reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 | 287 (u32)be16_to_cpu(v4_spec->psrc) << 8 | 288 (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8; 289 if (mask) 290 offset = CORE_CFP_MASK_PORT(2); 291 else 292 offset = CORE_CFP_DATA_PORT(2); 293 core_writel(priv, reg, offset); 294 295 /* UDF_n_A3 [31:24] 296 * UDF_n_A2 [23:8] 297 * UDF_n_A1 [7:0] 298 */ 299 reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 | 300 (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 | 301 (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8; 302 if (mask) 303 offset = CORE_CFP_MASK_PORT(1); 304 else 305 offset = CORE_CFP_DATA_PORT(1); 306 core_writel(priv, reg, offset); 307 308 /* UDF_n_A1 [31:24] 309 * UDF_n_A0 [23:8] 310 * Reserved [7:4] 311 * Slice ID [3:2] 312 * Slice valid [1:0] 313 */ 314 reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 | 315 (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 | 316 SLICE_NUM(slice_num) | SLICE_VALID; 317 if (mask) 318 offset = CORE_CFP_MASK_PORT(0); 319 else 320 offset = CORE_CFP_DATA_PORT(0); 321 core_writel(priv, reg, offset); 322 } 323 324 static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, 325 unsigned int port_num, 326 unsigned int queue_num, 327 struct ethtool_rx_flow_spec *fs) 328 { 329 struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec; 330 const struct cfp_udf_layout *layout; 331 unsigned int slice_num, rule_index; 332 u8 ip_proto, ip_frag; 333 u8 num_udf; 334 u32 reg; 335 int ret; 336 337 switch (fs->flow_type & ~FLOW_EXT) { 338 case TCP_V4_FLOW: 339 ip_proto = IPPROTO_TCP; 340 v4_spec = &fs->h_u.tcp_ip4_spec; 341 v4_m_spec = &fs->m_u.tcp_ip4_spec; 342 break; 343 case UDP_V4_FLOW: 344 ip_proto = IPPROTO_UDP; 345 v4_spec = &fs->h_u.udp_ip4_spec; 346 v4_m_spec = &fs->m_u.udp_ip4_spec; 347 break; 348 default: 349 return -EINVAL; 350 } 351 352 ip_frag = be32_to_cpu(fs->m_ext.data[0]); 353 354 /* Locate the first rule available */ 355 if (fs->location == RX_CLS_LOC_ANY) 356 rule_index = find_first_zero_bit(priv->cfp.used, 357 priv->num_cfp_rules); 358 else 359 rule_index = fs->location; 360 361 if (rule_index > bcm_sf2_cfp_rule_size(priv)) 362 return -ENOSPC; 363 364 layout = &udf_tcpip4_layout; 365 /* We only use one UDF slice for now */ 366 slice_num = bcm_sf2_get_slice_number(layout, 0); 367 if (slice_num == UDF_NUM_SLICES) 368 return -EINVAL; 369 370 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices); 371 372 /* Apply the UDF layout for this filter */ 373 bcm_sf2_cfp_udf_set(priv, layout, slice_num); 374 375 /* Apply to all packets received through this port */ 376 core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7)); 377 378 /* Source port map match */ 379 core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7)); 380 381 /* S-Tag status [31:30] 382 * C-Tag status [29:28] 383 * L2 framing [27:26] 384 * L3 framing [25:24] 385 * IP ToS [23:16] 386 * IP proto [15:08] 387 * IP Fragm [7] 388 * Non 1st frag [6] 389 * IP Authen [5] 390 * TTL range [4:3] 391 * PPPoE session [2] 392 * Reserved [1] 393 * UDF_Valid[8] [0] 394 */ 395 core_writel(priv, v4_spec->tos << IPTOS_SHIFT | 396 ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT | 397 udf_upper_bits(num_udf), 398 CORE_CFP_DATA_PORT(6)); 399 400 /* Mask with the specific layout for IPv4 packets */ 401 core_writel(priv, layout->udfs[slice_num].mask_value | 402 udf_upper_bits(num_udf), CORE_CFP_MASK_PORT(6)); 403 404 /* UDF_Valid[7:0] [31:24] 405 * S-Tag [23:8] 406 * C-Tag [7:0] 407 */ 408 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_DATA_PORT(5)); 409 410 /* Mask all but valid UDFs */ 411 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5)); 412 413 /* Program the match and the mask */ 414 bcm_sf2_cfp_slice_ipv4(priv, v4_spec, slice_num, false); 415 bcm_sf2_cfp_slice_ipv4(priv, v4_m_spec, SLICE_NUM_MASK, true); 416 417 /* Insert into TCAM now */ 418 bcm_sf2_cfp_rule_addr_set(priv, rule_index); 419 420 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); 421 if (ret) { 422 pr_err("TCAM entry at addr %d failed\n", rule_index); 423 return ret; 424 } 425 426 /* Insert into Action and policer RAMs now */ 427 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port_num, 428 queue_num, true); 429 if (ret) 430 return ret; 431 432 /* Turn on CFP for this rule now */ 433 reg = core_readl(priv, CORE_CFP_CTL_REG); 434 reg |= BIT(port); 435 core_writel(priv, reg, CORE_CFP_CTL_REG); 436 437 /* Flag the rule as being used and return it */ 438 set_bit(rule_index, priv->cfp.used); 439 set_bit(rule_index, priv->cfp.unique); 440 fs->location = rule_index; 441 442 return 0; 443 } 444 445 static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv, 446 const __be32 *ip6_addr, const __be16 port, 447 unsigned int slice_num, 448 bool mask) 449 { 450 u32 reg, tmp, val, offset; 451 452 /* C-Tag [31:24] 453 * UDF_n_B8 [23:8] (port) 454 * UDF_n_B7 (upper) [7:0] (addr[15:8]) 455 */ 456 reg = be32_to_cpu(ip6_addr[3]); 457 val = (u32)be16_to_cpu(port) << 8 | ((reg >> 8) & 0xff); 458 if (mask) 459 offset = CORE_CFP_MASK_PORT(4); 460 else 461 offset = CORE_CFP_DATA_PORT(4); 462 core_writel(priv, val, offset); 463 464 /* UDF_n_B7 (lower) [31:24] (addr[7:0]) 465 * UDF_n_B6 [23:8] (addr[31:16]) 466 * UDF_n_B5 (upper) [7:0] (addr[47:40]) 467 */ 468 tmp = be32_to_cpu(ip6_addr[2]); 469 val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 | 470 ((tmp >> 8) & 0xff); 471 if (mask) 472 offset = CORE_CFP_MASK_PORT(3); 473 else 474 offset = CORE_CFP_DATA_PORT(3); 475 core_writel(priv, val, offset); 476 477 /* UDF_n_B5 (lower) [31:24] (addr[39:32]) 478 * UDF_n_B4 [23:8] (addr[63:48]) 479 * UDF_n_B3 (upper) [7:0] (addr[79:72]) 480 */ 481 reg = be32_to_cpu(ip6_addr[1]); 482 val = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 | 483 ((reg >> 8) & 0xff); 484 if (mask) 485 offset = CORE_CFP_MASK_PORT(2); 486 else 487 offset = CORE_CFP_DATA_PORT(2); 488 core_writel(priv, val, offset); 489 490 /* UDF_n_B3 (lower) [31:24] (addr[71:64]) 491 * UDF_n_B2 [23:8] (addr[95:80]) 492 * UDF_n_B1 (upper) [7:0] (addr[111:104]) 493 */ 494 tmp = be32_to_cpu(ip6_addr[0]); 495 val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 | 496 ((tmp >> 8) & 0xff); 497 if (mask) 498 offset = CORE_CFP_MASK_PORT(1); 499 else 500 offset = CORE_CFP_DATA_PORT(1); 501 core_writel(priv, val, offset); 502 503 /* UDF_n_B1 (lower) [31:24] (addr[103:96]) 504 * UDF_n_B0 [23:8] (addr[127:112]) 505 * Reserved [7:4] 506 * Slice ID [3:2] 507 * Slice valid [1:0] 508 */ 509 reg = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 | 510 SLICE_NUM(slice_num) | SLICE_VALID; 511 if (mask) 512 offset = CORE_CFP_MASK_PORT(0); 513 else 514 offset = CORE_CFP_DATA_PORT(0); 515 core_writel(priv, reg, offset); 516 } 517 518 static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, 519 unsigned int port_num, 520 unsigned int queue_num, 521 struct ethtool_rx_flow_spec *fs) 522 { 523 struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec; 524 unsigned int slice_num, rule_index[2]; 525 const struct cfp_udf_layout *layout; 526 u8 ip_proto, ip_frag; 527 int ret = 0; 528 u8 num_udf; 529 u32 reg; 530 531 switch (fs->flow_type & ~FLOW_EXT) { 532 case TCP_V6_FLOW: 533 ip_proto = IPPROTO_TCP; 534 v6_spec = &fs->h_u.tcp_ip6_spec; 535 v6_m_spec = &fs->m_u.tcp_ip6_spec; 536 break; 537 case UDP_V6_FLOW: 538 ip_proto = IPPROTO_UDP; 539 v6_spec = &fs->h_u.udp_ip6_spec; 540 v6_m_spec = &fs->m_u.udp_ip6_spec; 541 break; 542 default: 543 return -EINVAL; 544 } 545 546 ip_frag = be32_to_cpu(fs->m_ext.data[0]); 547 548 layout = &udf_tcpip6_layout; 549 slice_num = bcm_sf2_get_slice_number(layout, 0); 550 if (slice_num == UDF_NUM_SLICES) 551 return -EINVAL; 552 553 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices); 554 555 /* Negotiate two indexes, one for the second half which we are chained 556 * from, which is what we will return to user-space, and a second one 557 * which is used to store its first half. That first half does not 558 * allow any choice of placement, so it just needs to find the next 559 * available bit. We return the second half as fs->location because 560 * that helps with the rule lookup later on since the second half is 561 * chained from its first half, we can easily identify IPv6 CFP rules 562 * by looking whether they carry a CHAIN_ID. 563 * 564 * We also want the second half to have a lower rule_index than its 565 * first half because the HW search is by incrementing addresses. 566 */ 567 if (fs->location == RX_CLS_LOC_ANY) 568 rule_index[1] = find_first_zero_bit(priv->cfp.used, 569 priv->num_cfp_rules); 570 else 571 rule_index[1] = fs->location; 572 if (rule_index[1] > bcm_sf2_cfp_rule_size(priv)) 573 return -ENOSPC; 574 575 /* Flag it as used (cleared on error path) such that we can immediately 576 * obtain a second one to chain from. 577 */ 578 set_bit(rule_index[1], priv->cfp.used); 579 580 rule_index[0] = find_first_zero_bit(priv->cfp.used, 581 priv->num_cfp_rules); 582 if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) { 583 ret = -ENOSPC; 584 goto out_err; 585 } 586 587 /* Apply the UDF layout for this filter */ 588 bcm_sf2_cfp_udf_set(priv, layout, slice_num); 589 590 /* Apply to all packets received through this port */ 591 core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7)); 592 593 /* Source port map match */ 594 core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7)); 595 596 /* S-Tag status [31:30] 597 * C-Tag status [29:28] 598 * L2 framing [27:26] 599 * L3 framing [25:24] 600 * IP ToS [23:16] 601 * IP proto [15:08] 602 * IP Fragm [7] 603 * Non 1st frag [6] 604 * IP Authen [5] 605 * TTL range [4:3] 606 * PPPoE session [2] 607 * Reserved [1] 608 * UDF_Valid[8] [0] 609 */ 610 reg = 1 << L3_FRAMING_SHIFT | ip_proto << IPPROTO_SHIFT | 611 ip_frag << IP_FRAG_SHIFT | udf_upper_bits(num_udf); 612 core_writel(priv, reg, CORE_CFP_DATA_PORT(6)); 613 614 /* Mask with the specific layout for IPv6 packets including 615 * UDF_Valid[8] 616 */ 617 reg = layout->udfs[slice_num].mask_value | udf_upper_bits(num_udf); 618 core_writel(priv, reg, CORE_CFP_MASK_PORT(6)); 619 620 /* UDF_Valid[7:0] [31:24] 621 * S-Tag [23:8] 622 * C-Tag [7:0] 623 */ 624 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_DATA_PORT(5)); 625 626 /* Mask all but valid UDFs */ 627 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5)); 628 629 /* Slice the IPv6 source address and port */ 630 bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc, 631 slice_num, false); 632 bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc, 633 SLICE_NUM_MASK, true); 634 635 /* Insert into TCAM now because we need to insert a second rule */ 636 bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]); 637 638 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); 639 if (ret) { 640 pr_err("TCAM entry at addr %d failed\n", rule_index[0]); 641 goto out_err; 642 } 643 644 /* Insert into Action and policer RAMs now */ 645 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num, 646 queue_num, false); 647 if (ret) 648 goto out_err; 649 650 /* Now deal with the second slice to chain this rule */ 651 slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1); 652 if (slice_num == UDF_NUM_SLICES) { 653 ret = -EINVAL; 654 goto out_err; 655 } 656 657 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices); 658 659 /* Apply the UDF layout for this filter */ 660 bcm_sf2_cfp_udf_set(priv, layout, slice_num); 661 662 /* Chained rule, source port match is coming from the rule we are 663 * chained from. 664 */ 665 core_writel(priv, 0, CORE_CFP_DATA_PORT(7)); 666 core_writel(priv, 0, CORE_CFP_MASK_PORT(7)); 667 668 /* 669 * CHAIN ID [31:24] chain to previous slice 670 * Reserved [23:20] 671 * UDF_Valid[11:8] [19:16] 672 * UDF_Valid[7:0] [15:8] 673 * UDF_n_D11 [7:0] 674 */ 675 reg = rule_index[0] << 24 | udf_upper_bits(num_udf) << 16 | 676 udf_lower_bits(num_udf) << 8; 677 core_writel(priv, reg, CORE_CFP_DATA_PORT(6)); 678 679 /* Mask all except chain ID, UDF Valid[8] and UDF Valid[7:0] */ 680 reg = XCESS_ADDR_MASK << 24 | udf_upper_bits(num_udf) << 16 | 681 udf_lower_bits(num_udf) << 8; 682 core_writel(priv, reg, CORE_CFP_MASK_PORT(6)); 683 684 /* Don't care */ 685 core_writel(priv, 0, CORE_CFP_DATA_PORT(5)); 686 687 /* Mask all */ 688 core_writel(priv, 0, CORE_CFP_MASK_PORT(5)); 689 690 bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6dst, v6_spec->pdst, slice_num, 691 false); 692 bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6dst, v6_m_spec->pdst, 693 SLICE_NUM_MASK, true); 694 695 /* Insert into TCAM now */ 696 bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]); 697 698 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); 699 if (ret) { 700 pr_err("TCAM entry at addr %d failed\n", rule_index[1]); 701 goto out_err; 702 } 703 704 /* Insert into Action and policer RAMs now, set chain ID to 705 * the one we are chained to 706 */ 707 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port_num, 708 queue_num, true); 709 if (ret) 710 goto out_err; 711 712 /* Turn on CFP for this rule now */ 713 reg = core_readl(priv, CORE_CFP_CTL_REG); 714 reg |= BIT(port); 715 core_writel(priv, reg, CORE_CFP_CTL_REG); 716 717 /* Flag the second half rule as being used now, return it as the 718 * location, and flag it as unique while dumping rules 719 */ 720 set_bit(rule_index[0], priv->cfp.used); 721 set_bit(rule_index[1], priv->cfp.unique); 722 fs->location = rule_index[1]; 723 724 return ret; 725 726 out_err: 727 clear_bit(rule_index[1], priv->cfp.used); 728 return ret; 729 } 730 731 static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, 732 struct ethtool_rx_flow_spec *fs) 733 { 734 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 735 s8 cpu_port = ds->ports[port].cpu_dp->index; 736 __u64 ring_cookie = fs->ring_cookie; 737 unsigned int queue_num, port_num; 738 int ret = -EINVAL; 739 740 /* Check for unsupported extensions */ 741 if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype || 742 fs->m_ext.data[1])) 743 return -EINVAL; 744 745 if (fs->location != RX_CLS_LOC_ANY && 746 test_bit(fs->location, priv->cfp.used)) 747 return -EBUSY; 748 749 if (fs->location != RX_CLS_LOC_ANY && 750 fs->location > bcm_sf2_cfp_rule_size(priv)) 751 return -EINVAL; 752 753 /* This rule is a Wake-on-LAN filter and we must specifically 754 * target the CPU port in order for it to be working. 755 */ 756 if (ring_cookie == RX_CLS_FLOW_WAKE) 757 ring_cookie = cpu_port * SF2_NUM_EGRESS_QUEUES; 758 759 /* We do not support discarding packets, check that the 760 * destination port is enabled and that we are within the 761 * number of ports supported by the switch 762 */ 763 port_num = ring_cookie / SF2_NUM_EGRESS_QUEUES; 764 765 if (ring_cookie == RX_CLS_FLOW_DISC || 766 !(dsa_is_user_port(ds, port_num) || 767 dsa_is_cpu_port(ds, port_num)) || 768 port_num >= priv->hw_params.num_ports) 769 return -EINVAL; 770 /* 771 * We have a small oddity where Port 6 just does not have a 772 * valid bit here (so we substract by one). 773 */ 774 queue_num = ring_cookie % SF2_NUM_EGRESS_QUEUES; 775 if (port_num >= 7) 776 port_num -= 1; 777 778 switch (fs->flow_type & ~FLOW_EXT) { 779 case TCP_V4_FLOW: 780 case UDP_V4_FLOW: 781 ret = bcm_sf2_cfp_ipv4_rule_set(priv, port, port_num, 782 queue_num, fs); 783 break; 784 case TCP_V6_FLOW: 785 case UDP_V6_FLOW: 786 ret = bcm_sf2_cfp_ipv6_rule_set(priv, port, port_num, 787 queue_num, fs); 788 break; 789 default: 790 break; 791 } 792 793 return ret; 794 } 795 796 static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port, 797 u32 loc, u32 *next_loc) 798 { 799 int ret; 800 u32 reg; 801 802 /* Indicate which rule we want to read */ 803 bcm_sf2_cfp_rule_addr_set(priv, loc); 804 805 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL); 806 if (ret) 807 return ret; 808 809 /* Check if this is possibly an IPv6 rule that would 810 * indicate we need to delete its companion rule 811 * as well 812 */ 813 reg = core_readl(priv, CORE_CFP_DATA_PORT(6)); 814 if (next_loc) 815 *next_loc = (reg >> 24) & CHAIN_ID_MASK; 816 817 /* Clear its valid bits */ 818 reg = core_readl(priv, CORE_CFP_DATA_PORT(0)); 819 reg &= ~SLICE_VALID; 820 core_writel(priv, reg, CORE_CFP_DATA_PORT(0)); 821 822 /* Write back this entry into the TCAM now */ 823 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); 824 if (ret) 825 return ret; 826 827 clear_bit(loc, priv->cfp.used); 828 clear_bit(loc, priv->cfp.unique); 829 830 return 0; 831 } 832 833 static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, 834 u32 loc) 835 { 836 u32 next_loc = 0; 837 int ret; 838 839 /* Refuse deleting unused rules, and those that are not unique since 840 * that could leave IPv6 rules with one of the chained rule in the 841 * table. 842 */ 843 if (!test_bit(loc, priv->cfp.unique) || loc == 0) 844 return -EINVAL; 845 846 ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc); 847 if (ret) 848 return ret; 849 850 /* If this was an IPv6 rule, delete is companion rule too */ 851 if (next_loc) 852 ret = bcm_sf2_cfp_rule_del_one(priv, port, next_loc, NULL); 853 854 return ret; 855 } 856 857 static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow) 858 { 859 unsigned int i; 860 861 for (i = 0; i < sizeof(flow->m_u); i++) 862 flow->m_u.hdata[i] ^= 0xff; 863 864 flow->m_ext.vlan_etype ^= cpu_to_be16(~0); 865 flow->m_ext.vlan_tci ^= cpu_to_be16(~0); 866 flow->m_ext.data[0] ^= cpu_to_be32(~0); 867 flow->m_ext.data[1] ^= cpu_to_be32(~0); 868 } 869 870 static int bcm_sf2_cfp_unslice_ipv4(struct bcm_sf2_priv *priv, 871 struct ethtool_tcpip4_spec *v4_spec, 872 bool mask) 873 { 874 u32 reg, offset, ipv4; 875 u16 src_dst_port; 876 877 if (mask) 878 offset = CORE_CFP_MASK_PORT(3); 879 else 880 offset = CORE_CFP_DATA_PORT(3); 881 882 reg = core_readl(priv, offset); 883 /* src port [15:8] */ 884 src_dst_port = reg << 8; 885 886 if (mask) 887 offset = CORE_CFP_MASK_PORT(2); 888 else 889 offset = CORE_CFP_DATA_PORT(2); 890 891 reg = core_readl(priv, offset); 892 /* src port [7:0] */ 893 src_dst_port |= (reg >> 24); 894 895 v4_spec->pdst = cpu_to_be16(src_dst_port); 896 v4_spec->psrc = cpu_to_be16((u16)(reg >> 8)); 897 898 /* IPv4 dst [15:8] */ 899 ipv4 = (reg & 0xff) << 8; 900 901 if (mask) 902 offset = CORE_CFP_MASK_PORT(1); 903 else 904 offset = CORE_CFP_DATA_PORT(1); 905 906 reg = core_readl(priv, offset); 907 /* IPv4 dst [31:16] */ 908 ipv4 |= ((reg >> 8) & 0xffff) << 16; 909 /* IPv4 dst [7:0] */ 910 ipv4 |= (reg >> 24) & 0xff; 911 v4_spec->ip4dst = cpu_to_be32(ipv4); 912 913 /* IPv4 src [15:8] */ 914 ipv4 = (reg & 0xff) << 8; 915 916 if (mask) 917 offset = CORE_CFP_MASK_PORT(0); 918 else 919 offset = CORE_CFP_DATA_PORT(0); 920 reg = core_readl(priv, offset); 921 922 /* Once the TCAM is programmed, the mask reflects the slice number 923 * being matched, don't bother checking it when reading back the 924 * mask spec 925 */ 926 if (!mask && !(reg & SLICE_VALID)) 927 return -EINVAL; 928 929 /* IPv4 src [7:0] */ 930 ipv4 |= (reg >> 24) & 0xff; 931 /* IPv4 src [31:16] */ 932 ipv4 |= ((reg >> 8) & 0xffff) << 16; 933 v4_spec->ip4src = cpu_to_be32(ipv4); 934 935 return 0; 936 } 937 938 static int bcm_sf2_cfp_ipv4_rule_get(struct bcm_sf2_priv *priv, int port, 939 struct ethtool_rx_flow_spec *fs) 940 { 941 struct ethtool_tcpip4_spec *v4_spec = NULL, *v4_m_spec = NULL; 942 u32 reg; 943 int ret; 944 945 reg = core_readl(priv, CORE_CFP_DATA_PORT(6)); 946 947 switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) { 948 case IPPROTO_TCP: 949 fs->flow_type = TCP_V4_FLOW; 950 v4_spec = &fs->h_u.tcp_ip4_spec; 951 v4_m_spec = &fs->m_u.tcp_ip4_spec; 952 break; 953 case IPPROTO_UDP: 954 fs->flow_type = UDP_V4_FLOW; 955 v4_spec = &fs->h_u.udp_ip4_spec; 956 v4_m_spec = &fs->m_u.udp_ip4_spec; 957 break; 958 default: 959 return -EINVAL; 960 } 961 962 fs->m_ext.data[0] = cpu_to_be32((reg >> IP_FRAG_SHIFT) & 1); 963 v4_spec->tos = (reg >> IPTOS_SHIFT) & IPTOS_MASK; 964 965 ret = bcm_sf2_cfp_unslice_ipv4(priv, v4_spec, false); 966 if (ret) 967 return ret; 968 969 return bcm_sf2_cfp_unslice_ipv4(priv, v4_m_spec, true); 970 } 971 972 static int bcm_sf2_cfp_unslice_ipv6(struct bcm_sf2_priv *priv, 973 __be32 *ip6_addr, __be16 *port, 974 bool mask) 975 { 976 u32 reg, tmp, offset; 977 978 /* C-Tag [31:24] 979 * UDF_n_B8 [23:8] (port) 980 * UDF_n_B7 (upper) [7:0] (addr[15:8]) 981 */ 982 if (mask) 983 offset = CORE_CFP_MASK_PORT(4); 984 else 985 offset = CORE_CFP_DATA_PORT(4); 986 reg = core_readl(priv, offset); 987 *port = cpu_to_be32(reg) >> 8; 988 tmp = (u32)(reg & 0xff) << 8; 989 990 /* UDF_n_B7 (lower) [31:24] (addr[7:0]) 991 * UDF_n_B6 [23:8] (addr[31:16]) 992 * UDF_n_B5 (upper) [7:0] (addr[47:40]) 993 */ 994 if (mask) 995 offset = CORE_CFP_MASK_PORT(3); 996 else 997 offset = CORE_CFP_DATA_PORT(3); 998 reg = core_readl(priv, offset); 999 tmp |= (reg >> 24) & 0xff; 1000 tmp |= (u32)((reg >> 8) << 16); 1001 ip6_addr[3] = cpu_to_be32(tmp); 1002 tmp = (u32)(reg & 0xff) << 8; 1003 1004 /* UDF_n_B5 (lower) [31:24] (addr[39:32]) 1005 * UDF_n_B4 [23:8] (addr[63:48]) 1006 * UDF_n_B3 (upper) [7:0] (addr[79:72]) 1007 */ 1008 if (mask) 1009 offset = CORE_CFP_MASK_PORT(2); 1010 else 1011 offset = CORE_CFP_DATA_PORT(2); 1012 reg = core_readl(priv, offset); 1013 tmp |= (reg >> 24) & 0xff; 1014 tmp |= (u32)((reg >> 8) << 16); 1015 ip6_addr[2] = cpu_to_be32(tmp); 1016 tmp = (u32)(reg & 0xff) << 8; 1017 1018 /* UDF_n_B3 (lower) [31:24] (addr[71:64]) 1019 * UDF_n_B2 [23:8] (addr[95:80]) 1020 * UDF_n_B1 (upper) [7:0] (addr[111:104]) 1021 */ 1022 if (mask) 1023 offset = CORE_CFP_MASK_PORT(1); 1024 else 1025 offset = CORE_CFP_DATA_PORT(1); 1026 reg = core_readl(priv, offset); 1027 tmp |= (reg >> 24) & 0xff; 1028 tmp |= (u32)((reg >> 8) << 16); 1029 ip6_addr[1] = cpu_to_be32(tmp); 1030 tmp = (u32)(reg & 0xff) << 8; 1031 1032 /* UDF_n_B1 (lower) [31:24] (addr[103:96]) 1033 * UDF_n_B0 [23:8] (addr[127:112]) 1034 * Reserved [7:4] 1035 * Slice ID [3:2] 1036 * Slice valid [1:0] 1037 */ 1038 if (mask) 1039 offset = CORE_CFP_MASK_PORT(0); 1040 else 1041 offset = CORE_CFP_DATA_PORT(0); 1042 reg = core_readl(priv, offset); 1043 tmp |= (reg >> 24) & 0xff; 1044 tmp |= (u32)((reg >> 8) << 16); 1045 ip6_addr[0] = cpu_to_be32(tmp); 1046 1047 if (!mask && !(reg & SLICE_VALID)) 1048 return -EINVAL; 1049 1050 return 0; 1051 } 1052 1053 static int bcm_sf2_cfp_ipv6_rule_get(struct bcm_sf2_priv *priv, int port, 1054 struct ethtool_rx_flow_spec *fs, 1055 u32 next_loc) 1056 { 1057 struct ethtool_tcpip6_spec *v6_spec = NULL, *v6_m_spec = NULL; 1058 u32 reg; 1059 int ret; 1060 1061 /* UDPv6 and TCPv6 both use ethtool_tcpip6_spec so we are fine 1062 * assuming tcp_ip6_spec here being an union. 1063 */ 1064 v6_spec = &fs->h_u.tcp_ip6_spec; 1065 v6_m_spec = &fs->m_u.tcp_ip6_spec; 1066 1067 /* Read the second half first */ 1068 ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6dst, &v6_spec->pdst, 1069 false); 1070 if (ret) 1071 return ret; 1072 1073 ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6dst, 1074 &v6_m_spec->pdst, true); 1075 if (ret) 1076 return ret; 1077 1078 /* Read last to avoid next entry clobbering the results during search 1079 * operations. We would not have the port enabled for this rule, so 1080 * don't bother checking it. 1081 */ 1082 (void)core_readl(priv, CORE_CFP_DATA_PORT(7)); 1083 1084 /* The slice number is valid, so read the rule we are chained from now 1085 * which is our first half. 1086 */ 1087 bcm_sf2_cfp_rule_addr_set(priv, next_loc); 1088 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL); 1089 if (ret) 1090 return ret; 1091 1092 reg = core_readl(priv, CORE_CFP_DATA_PORT(6)); 1093 1094 switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) { 1095 case IPPROTO_TCP: 1096 fs->flow_type = TCP_V6_FLOW; 1097 break; 1098 case IPPROTO_UDP: 1099 fs->flow_type = UDP_V6_FLOW; 1100 break; 1101 default: 1102 return -EINVAL; 1103 } 1104 1105 ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6src, &v6_spec->psrc, 1106 false); 1107 if (ret) 1108 return ret; 1109 1110 return bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6src, 1111 &v6_m_spec->psrc, true); 1112 } 1113 1114 static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port, 1115 struct ethtool_rxnfc *nfc) 1116 { 1117 u32 reg, ipv4_or_chain_id; 1118 unsigned int queue_num; 1119 int ret; 1120 1121 bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location); 1122 1123 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM); 1124 if (ret) 1125 return ret; 1126 1127 reg = core_readl(priv, CORE_ACT_POL_DATA0); 1128 1129 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL); 1130 if (ret) 1131 return ret; 1132 1133 /* Extract the destination port */ 1134 nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) & 1135 DST_MAP_IB_MASK) - 1; 1136 1137 /* There is no Port 6, so we compensate for that here */ 1138 if (nfc->fs.ring_cookie >= 6) 1139 nfc->fs.ring_cookie++; 1140 nfc->fs.ring_cookie *= SF2_NUM_EGRESS_QUEUES; 1141 1142 /* Extract the destination queue */ 1143 queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK; 1144 nfc->fs.ring_cookie += queue_num; 1145 1146 /* Extract the L3_FRAMING or CHAIN_ID */ 1147 reg = core_readl(priv, CORE_CFP_DATA_PORT(6)); 1148 1149 /* With IPv6 rules this would contain a non-zero chain ID since 1150 * we reserve entry 0 and it cannot be used. So if we read 0 here 1151 * this means an IPv4 rule. 1152 */ 1153 ipv4_or_chain_id = (reg >> L3_FRAMING_SHIFT) & 0xff; 1154 if (ipv4_or_chain_id == 0) 1155 ret = bcm_sf2_cfp_ipv4_rule_get(priv, port, &nfc->fs); 1156 else 1157 ret = bcm_sf2_cfp_ipv6_rule_get(priv, port, &nfc->fs, 1158 ipv4_or_chain_id); 1159 if (ret) 1160 return ret; 1161 1162 /* Read last to avoid next entry clobbering the results during search 1163 * operations 1164 */ 1165 reg = core_readl(priv, CORE_CFP_DATA_PORT(7)); 1166 if (!(reg & 1 << port)) 1167 return -EINVAL; 1168 1169 bcm_sf2_invert_masks(&nfc->fs); 1170 1171 /* Put the TCAM size here */ 1172 nfc->data = bcm_sf2_cfp_rule_size(priv); 1173 1174 return 0; 1175 } 1176 1177 /* We implement the search doing a TCAM search operation */ 1178 static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv, 1179 int port, struct ethtool_rxnfc *nfc, 1180 u32 *rule_locs) 1181 { 1182 unsigned int index = 1, rules_cnt = 0; 1183 1184 for_each_set_bit_from(index, priv->cfp.unique, priv->num_cfp_rules) { 1185 rule_locs[rules_cnt] = index; 1186 rules_cnt++; 1187 } 1188 1189 /* Put the TCAM size here */ 1190 nfc->data = bcm_sf2_cfp_rule_size(priv); 1191 nfc->rule_cnt = rules_cnt; 1192 1193 return 0; 1194 } 1195 1196 int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, 1197 struct ethtool_rxnfc *nfc, u32 *rule_locs) 1198 { 1199 struct net_device *p = ds->ports[port].cpu_dp->master; 1200 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 1201 int ret = 0; 1202 1203 mutex_lock(&priv->cfp.lock); 1204 1205 switch (nfc->cmd) { 1206 case ETHTOOL_GRXCLSRLCNT: 1207 /* Subtract the default, unusable rule */ 1208 nfc->rule_cnt = bitmap_weight(priv->cfp.unique, 1209 priv->num_cfp_rules) - 1; 1210 /* We support specifying rule locations */ 1211 nfc->data |= RX_CLS_LOC_SPECIAL; 1212 break; 1213 case ETHTOOL_GRXCLSRULE: 1214 ret = bcm_sf2_cfp_rule_get(priv, port, nfc); 1215 break; 1216 case ETHTOOL_GRXCLSRLALL: 1217 ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs); 1218 break; 1219 default: 1220 ret = -EOPNOTSUPP; 1221 break; 1222 } 1223 1224 mutex_unlock(&priv->cfp.lock); 1225 1226 if (ret) 1227 return ret; 1228 1229 /* Pass up the commands to the attached master network device */ 1230 if (p->ethtool_ops->get_rxnfc) { 1231 ret = p->ethtool_ops->get_rxnfc(p, nfc, rule_locs); 1232 if (ret == -EOPNOTSUPP) 1233 ret = 0; 1234 } 1235 1236 return ret; 1237 } 1238 1239 int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port, 1240 struct ethtool_rxnfc *nfc) 1241 { 1242 struct net_device *p = ds->ports[port].cpu_dp->master; 1243 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 1244 int ret = 0; 1245 1246 mutex_lock(&priv->cfp.lock); 1247 1248 switch (nfc->cmd) { 1249 case ETHTOOL_SRXCLSRLINS: 1250 ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs); 1251 break; 1252 1253 case ETHTOOL_SRXCLSRLDEL: 1254 ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location); 1255 break; 1256 default: 1257 ret = -EOPNOTSUPP; 1258 break; 1259 } 1260 1261 mutex_unlock(&priv->cfp.lock); 1262 1263 if (ret) 1264 return ret; 1265 1266 /* Pass up the commands to the attached master network device. 1267 * This can fail, so rollback the operation if we need to. 1268 */ 1269 if (p->ethtool_ops->set_rxnfc) { 1270 ret = p->ethtool_ops->set_rxnfc(p, nfc); 1271 if (ret && ret != -EOPNOTSUPP) { 1272 mutex_lock(&priv->cfp.lock); 1273 bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location); 1274 mutex_unlock(&priv->cfp.lock); 1275 } else { 1276 ret = 0; 1277 } 1278 } 1279 1280 return ret; 1281 } 1282 1283 int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv) 1284 { 1285 unsigned int timeout = 1000; 1286 u32 reg; 1287 1288 reg = core_readl(priv, CORE_CFP_ACC); 1289 reg |= TCAM_RESET; 1290 core_writel(priv, reg, CORE_CFP_ACC); 1291 1292 do { 1293 reg = core_readl(priv, CORE_CFP_ACC); 1294 if (!(reg & TCAM_RESET)) 1295 break; 1296 1297 cpu_relax(); 1298 } while (timeout--); 1299 1300 if (!timeout) 1301 return -ETIMEDOUT; 1302 1303 return 0; 1304 } 1305