1 /* 2 * Broadcom Starfighter 2 DSA switch CFP support 3 * 4 * Copyright (C) 2016, Broadcom 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/list.h> 13 #include <linux/ethtool.h> 14 #include <linux/if_ether.h> 15 #include <linux/in.h> 16 #include <linux/netdevice.h> 17 #include <net/dsa.h> 18 #include <linux/bitmap.h> 19 20 #include "bcm_sf2.h" 21 #include "bcm_sf2_regs.h" 22 23 struct cfp_udf_slice_layout { 24 u8 slices[UDFS_PER_SLICE]; 25 u32 mask_value; 26 u32 base_offset; 27 }; 28 29 struct cfp_udf_layout { 30 struct cfp_udf_slice_layout udfs[UDF_NUM_SLICES]; 31 }; 32 33 static const u8 zero_slice[UDFS_PER_SLICE] = { }; 34 35 /* UDF slices layout for a TCPv4/UDPv4 specification */ 36 static const struct cfp_udf_layout udf_tcpip4_layout = { 37 .udfs = { 38 [1] = { 39 .slices = { 40 /* End of L2, byte offset 12, src IP[0:15] */ 41 CFG_UDF_EOL2 | 6, 42 /* End of L2, byte offset 14, src IP[16:31] */ 43 CFG_UDF_EOL2 | 7, 44 /* End of L2, byte offset 16, dst IP[0:15] */ 45 CFG_UDF_EOL2 | 8, 46 /* End of L2, byte offset 18, dst IP[16:31] */ 47 CFG_UDF_EOL2 | 9, 48 /* End of L3, byte offset 0, src port */ 49 CFG_UDF_EOL3 | 0, 50 /* End of L3, byte offset 2, dst port */ 51 CFG_UDF_EOL3 | 1, 52 0, 0, 0 53 }, 54 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG, 55 .base_offset = CORE_UDF_0_A_0_8_PORT_0 + UDF_SLICE_OFFSET, 56 }, 57 }, 58 }; 59 60 /* UDF slices layout for a TCPv6/UDPv6 specification */ 61 static const struct cfp_udf_layout udf_tcpip6_layout = { 62 .udfs = { 63 [0] = { 64 .slices = { 65 /* End of L2, byte offset 8, src IP[0:15] */ 66 CFG_UDF_EOL2 | 4, 67 /* End of L2, byte offset 10, src IP[16:31] */ 68 CFG_UDF_EOL2 | 5, 69 /* End of L2, byte offset 12, src IP[32:47] */ 70 CFG_UDF_EOL2 | 6, 71 /* End of L2, byte offset 14, src IP[48:63] */ 72 CFG_UDF_EOL2 | 7, 73 /* End of L2, byte offset 16, src IP[64:79] */ 74 CFG_UDF_EOL2 | 8, 75 /* End of L2, byte offset 18, src IP[80:95] */ 76 CFG_UDF_EOL2 | 9, 77 /* End of L2, byte offset 20, src IP[96:111] */ 78 CFG_UDF_EOL2 | 10, 79 /* End of L2, byte offset 22, src IP[112:127] */ 80 CFG_UDF_EOL2 | 11, 81 /* End of L3, byte offset 0, src port */ 82 CFG_UDF_EOL3 | 0, 83 }, 84 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG, 85 .base_offset = CORE_UDF_0_B_0_8_PORT_0, 86 }, 87 [3] = { 88 .slices = { 89 /* End of L2, byte offset 24, dst IP[0:15] */ 90 CFG_UDF_EOL2 | 12, 91 /* End of L2, byte offset 26, dst IP[16:31] */ 92 CFG_UDF_EOL2 | 13, 93 /* End of L2, byte offset 28, dst IP[32:47] */ 94 CFG_UDF_EOL2 | 14, 95 /* End of L2, byte offset 30, dst IP[48:63] */ 96 CFG_UDF_EOL2 | 15, 97 /* End of L2, byte offset 32, dst IP[64:79] */ 98 CFG_UDF_EOL2 | 16, 99 /* End of L2, byte offset 34, dst IP[80:95] */ 100 CFG_UDF_EOL2 | 17, 101 /* End of L2, byte offset 36, dst IP[96:111] */ 102 CFG_UDF_EOL2 | 18, 103 /* End of L2, byte offset 38, dst IP[112:127] */ 104 CFG_UDF_EOL2 | 19, 105 /* End of L3, byte offset 2, dst port */ 106 CFG_UDF_EOL3 | 1, 107 }, 108 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG, 109 .base_offset = CORE_UDF_0_D_0_11_PORT_0, 110 }, 111 }, 112 }; 113 114 static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout) 115 { 116 unsigned int i, count = 0; 117 118 for (i = 0; i < UDFS_PER_SLICE; i++) { 119 if (layout[i] != 0) 120 count++; 121 } 122 123 return count; 124 } 125 126 static inline u32 udf_upper_bits(unsigned int num_udf) 127 { 128 return GENMASK(num_udf - 1, 0) >> (UDFS_PER_SLICE - 1); 129 } 130 131 static inline u32 udf_lower_bits(unsigned int num_udf) 132 { 133 return (u8)GENMASK(num_udf - 1, 0); 134 } 135 136 static unsigned int bcm_sf2_get_slice_number(const struct cfp_udf_layout *l, 137 unsigned int start) 138 { 139 const struct cfp_udf_slice_layout *slice_layout; 140 unsigned int slice_idx; 141 142 for (slice_idx = start; slice_idx < UDF_NUM_SLICES; slice_idx++) { 143 slice_layout = &l->udfs[slice_idx]; 144 if (memcmp(slice_layout->slices, zero_slice, 145 sizeof(zero_slice))) 146 break; 147 } 148 149 return slice_idx; 150 } 151 152 static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv, 153 const struct cfp_udf_layout *layout, 154 unsigned int slice_num) 155 { 156 u32 offset = layout->udfs[slice_num].base_offset; 157 unsigned int i; 158 159 for (i = 0; i < UDFS_PER_SLICE; i++) 160 core_writel(priv, layout->udfs[slice_num].slices[i], 161 offset + i * 4); 162 } 163 164 static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op) 165 { 166 unsigned int timeout = 1000; 167 u32 reg; 168 169 reg = core_readl(priv, CORE_CFP_ACC); 170 reg &= ~(OP_SEL_MASK | RAM_SEL_MASK); 171 reg |= OP_STR_DONE | op; 172 core_writel(priv, reg, CORE_CFP_ACC); 173 174 do { 175 reg = core_readl(priv, CORE_CFP_ACC); 176 if (!(reg & OP_STR_DONE)) 177 break; 178 179 cpu_relax(); 180 } while (timeout--); 181 182 if (!timeout) 183 return -ETIMEDOUT; 184 185 return 0; 186 } 187 188 static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv, 189 unsigned int addr) 190 { 191 u32 reg; 192 193 WARN_ON(addr >= priv->num_cfp_rules); 194 195 reg = core_readl(priv, CORE_CFP_ACC); 196 reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT); 197 reg |= addr << XCESS_ADDR_SHIFT; 198 core_writel(priv, reg, CORE_CFP_ACC); 199 } 200 201 static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv) 202 { 203 /* Entry #0 is reserved */ 204 return priv->num_cfp_rules - 1; 205 } 206 207 static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv, 208 unsigned int rule_index, 209 unsigned int port_num, 210 unsigned int queue_num, 211 bool fwd_map_change) 212 { 213 int ret; 214 u32 reg; 215 216 /* Replace ARL derived destination with DST_MAP derived, define 217 * which port and queue this should be forwarded to. 218 */ 219 if (fwd_map_change) 220 reg = CHANGE_FWRD_MAP_IB_REP_ARL | 221 BIT(port_num + DST_MAP_IB_SHIFT) | 222 CHANGE_TC | queue_num << NEW_TC_SHIFT; 223 else 224 reg = 0; 225 226 core_writel(priv, reg, CORE_ACT_POL_DATA0); 227 228 /* Set classification ID that needs to be put in Broadcom tag */ 229 core_writel(priv, rule_index << CHAIN_ID_SHIFT, CORE_ACT_POL_DATA1); 230 231 core_writel(priv, 0, CORE_ACT_POL_DATA2); 232 233 /* Configure policer RAM now */ 234 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM); 235 if (ret) { 236 pr_err("Policer entry at %d failed\n", rule_index); 237 return ret; 238 } 239 240 /* Disable the policer */ 241 core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0); 242 243 /* Now the rate meter */ 244 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM); 245 if (ret) { 246 pr_err("Meter entry at %d failed\n", rule_index); 247 return ret; 248 } 249 250 return 0; 251 } 252 253 static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv, 254 struct ethtool_tcpip4_spec *v4_spec, 255 unsigned int slice_num, 256 bool mask) 257 { 258 u32 reg, offset; 259 260 /* C-Tag [31:24] 261 * UDF_n_A8 [23:8] 262 * UDF_n_A7 [7:0] 263 */ 264 reg = 0; 265 if (mask) 266 offset = CORE_CFP_MASK_PORT(4); 267 else 268 offset = CORE_CFP_DATA_PORT(4); 269 core_writel(priv, reg, offset); 270 271 /* UDF_n_A7 [31:24] 272 * UDF_n_A6 [23:8] 273 * UDF_n_A5 [7:0] 274 */ 275 reg = be16_to_cpu(v4_spec->pdst) >> 8; 276 if (mask) 277 offset = CORE_CFP_MASK_PORT(3); 278 else 279 offset = CORE_CFP_DATA_PORT(3); 280 core_writel(priv, reg, offset); 281 282 /* UDF_n_A5 [31:24] 283 * UDF_n_A4 [23:8] 284 * UDF_n_A3 [7:0] 285 */ 286 reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 | 287 (u32)be16_to_cpu(v4_spec->psrc) << 8 | 288 (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8; 289 if (mask) 290 offset = CORE_CFP_MASK_PORT(2); 291 else 292 offset = CORE_CFP_DATA_PORT(2); 293 core_writel(priv, reg, offset); 294 295 /* UDF_n_A3 [31:24] 296 * UDF_n_A2 [23:8] 297 * UDF_n_A1 [7:0] 298 */ 299 reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 | 300 (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 | 301 (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8; 302 if (mask) 303 offset = CORE_CFP_MASK_PORT(1); 304 else 305 offset = CORE_CFP_DATA_PORT(1); 306 core_writel(priv, reg, offset); 307 308 /* UDF_n_A1 [31:24] 309 * UDF_n_A0 [23:8] 310 * Reserved [7:4] 311 * Slice ID [3:2] 312 * Slice valid [1:0] 313 */ 314 reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 | 315 (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 | 316 SLICE_NUM(slice_num) | SLICE_VALID; 317 if (mask) 318 offset = CORE_CFP_MASK_PORT(0); 319 else 320 offset = CORE_CFP_DATA_PORT(0); 321 core_writel(priv, reg, offset); 322 } 323 324 static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, 325 unsigned int port_num, 326 unsigned int queue_num, 327 struct ethtool_rx_flow_spec *fs) 328 { 329 struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec; 330 const struct cfp_udf_layout *layout; 331 unsigned int slice_num, rule_index; 332 u8 ip_proto, ip_frag; 333 u8 num_udf; 334 u32 reg; 335 int ret; 336 337 switch (fs->flow_type & ~FLOW_EXT) { 338 case TCP_V4_FLOW: 339 ip_proto = IPPROTO_TCP; 340 v4_spec = &fs->h_u.tcp_ip4_spec; 341 v4_m_spec = &fs->m_u.tcp_ip4_spec; 342 break; 343 case UDP_V4_FLOW: 344 ip_proto = IPPROTO_UDP; 345 v4_spec = &fs->h_u.udp_ip4_spec; 346 v4_m_spec = &fs->m_u.udp_ip4_spec; 347 break; 348 default: 349 return -EINVAL; 350 } 351 352 ip_frag = be32_to_cpu(fs->m_ext.data[0]); 353 354 /* Locate the first rule available */ 355 if (fs->location == RX_CLS_LOC_ANY) 356 rule_index = find_first_zero_bit(priv->cfp.used, 357 priv->num_cfp_rules); 358 else 359 rule_index = fs->location; 360 361 if (rule_index > bcm_sf2_cfp_rule_size(priv)) 362 return -ENOSPC; 363 364 layout = &udf_tcpip4_layout; 365 /* We only use one UDF slice for now */ 366 slice_num = bcm_sf2_get_slice_number(layout, 0); 367 if (slice_num == UDF_NUM_SLICES) 368 return -EINVAL; 369 370 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices); 371 372 /* Apply the UDF layout for this filter */ 373 bcm_sf2_cfp_udf_set(priv, layout, slice_num); 374 375 /* Apply to all packets received through this port */ 376 core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7)); 377 378 /* Source port map match */ 379 core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7)); 380 381 /* S-Tag status [31:30] 382 * C-Tag status [29:28] 383 * L2 framing [27:26] 384 * L3 framing [25:24] 385 * IP ToS [23:16] 386 * IP proto [15:08] 387 * IP Fragm [7] 388 * Non 1st frag [6] 389 * IP Authen [5] 390 * TTL range [4:3] 391 * PPPoE session [2] 392 * Reserved [1] 393 * UDF_Valid[8] [0] 394 */ 395 core_writel(priv, v4_spec->tos << IPTOS_SHIFT | 396 ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT | 397 udf_upper_bits(num_udf), 398 CORE_CFP_DATA_PORT(6)); 399 400 /* Mask with the specific layout for IPv4 packets */ 401 core_writel(priv, layout->udfs[slice_num].mask_value | 402 udf_upper_bits(num_udf), CORE_CFP_MASK_PORT(6)); 403 404 /* UDF_Valid[7:0] [31:24] 405 * S-Tag [23:8] 406 * C-Tag [7:0] 407 */ 408 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_DATA_PORT(5)); 409 410 /* Mask all but valid UDFs */ 411 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5)); 412 413 /* Program the match and the mask */ 414 bcm_sf2_cfp_slice_ipv4(priv, v4_spec, slice_num, false); 415 bcm_sf2_cfp_slice_ipv4(priv, v4_m_spec, SLICE_NUM_MASK, true); 416 417 /* Insert into TCAM now */ 418 bcm_sf2_cfp_rule_addr_set(priv, rule_index); 419 420 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); 421 if (ret) { 422 pr_err("TCAM entry at addr %d failed\n", rule_index); 423 return ret; 424 } 425 426 /* Insert into Action and policer RAMs now */ 427 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port_num, 428 queue_num, true); 429 if (ret) 430 return ret; 431 432 /* Turn on CFP for this rule now */ 433 reg = core_readl(priv, CORE_CFP_CTL_REG); 434 reg |= BIT(port); 435 core_writel(priv, reg, CORE_CFP_CTL_REG); 436 437 /* Flag the rule as being used and return it */ 438 set_bit(rule_index, priv->cfp.used); 439 set_bit(rule_index, priv->cfp.unique); 440 fs->location = rule_index; 441 442 return 0; 443 } 444 445 static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv, 446 const __be32 *ip6_addr, const __be16 port, 447 unsigned int slice_num, 448 bool mask) 449 { 450 u32 reg, tmp, val, offset; 451 452 /* C-Tag [31:24] 453 * UDF_n_B8 [23:8] (port) 454 * UDF_n_B7 (upper) [7:0] (addr[15:8]) 455 */ 456 reg = be32_to_cpu(ip6_addr[3]); 457 val = (u32)be16_to_cpu(port) << 8 | ((reg >> 8) & 0xff); 458 if (mask) 459 offset = CORE_CFP_MASK_PORT(4); 460 else 461 offset = CORE_CFP_DATA_PORT(4); 462 core_writel(priv, val, offset); 463 464 /* UDF_n_B7 (lower) [31:24] (addr[7:0]) 465 * UDF_n_B6 [23:8] (addr[31:16]) 466 * UDF_n_B5 (upper) [7:0] (addr[47:40]) 467 */ 468 tmp = be32_to_cpu(ip6_addr[2]); 469 val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 | 470 ((tmp >> 8) & 0xff); 471 if (mask) 472 offset = CORE_CFP_MASK_PORT(3); 473 else 474 offset = CORE_CFP_DATA_PORT(3); 475 core_writel(priv, val, offset); 476 477 /* UDF_n_B5 (lower) [31:24] (addr[39:32]) 478 * UDF_n_B4 [23:8] (addr[63:48]) 479 * UDF_n_B3 (upper) [7:0] (addr[79:72]) 480 */ 481 reg = be32_to_cpu(ip6_addr[1]); 482 val = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 | 483 ((reg >> 8) & 0xff); 484 if (mask) 485 offset = CORE_CFP_MASK_PORT(2); 486 else 487 offset = CORE_CFP_DATA_PORT(2); 488 core_writel(priv, val, offset); 489 490 /* UDF_n_B3 (lower) [31:24] (addr[71:64]) 491 * UDF_n_B2 [23:8] (addr[95:80]) 492 * UDF_n_B1 (upper) [7:0] (addr[111:104]) 493 */ 494 tmp = be32_to_cpu(ip6_addr[0]); 495 val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 | 496 ((tmp >> 8) & 0xff); 497 if (mask) 498 offset = CORE_CFP_MASK_PORT(1); 499 else 500 offset = CORE_CFP_DATA_PORT(1); 501 core_writel(priv, val, offset); 502 503 /* UDF_n_B1 (lower) [31:24] (addr[103:96]) 504 * UDF_n_B0 [23:8] (addr[127:112]) 505 * Reserved [7:4] 506 * Slice ID [3:2] 507 * Slice valid [1:0] 508 */ 509 reg = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 | 510 SLICE_NUM(slice_num) | SLICE_VALID; 511 if (mask) 512 offset = CORE_CFP_MASK_PORT(0); 513 else 514 offset = CORE_CFP_DATA_PORT(0); 515 core_writel(priv, reg, offset); 516 } 517 518 static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, 519 unsigned int port_num, 520 unsigned int queue_num, 521 struct ethtool_rx_flow_spec *fs) 522 { 523 struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec; 524 unsigned int slice_num, rule_index[2]; 525 const struct cfp_udf_layout *layout; 526 u8 ip_proto, ip_frag; 527 int ret = 0; 528 u8 num_udf; 529 u32 reg; 530 531 switch (fs->flow_type & ~FLOW_EXT) { 532 case TCP_V6_FLOW: 533 ip_proto = IPPROTO_TCP; 534 v6_spec = &fs->h_u.tcp_ip6_spec; 535 v6_m_spec = &fs->m_u.tcp_ip6_spec; 536 break; 537 case UDP_V6_FLOW: 538 ip_proto = IPPROTO_UDP; 539 v6_spec = &fs->h_u.udp_ip6_spec; 540 v6_m_spec = &fs->m_u.udp_ip6_spec; 541 break; 542 default: 543 return -EINVAL; 544 } 545 546 ip_frag = be32_to_cpu(fs->m_ext.data[0]); 547 548 layout = &udf_tcpip6_layout; 549 slice_num = bcm_sf2_get_slice_number(layout, 0); 550 if (slice_num == UDF_NUM_SLICES) 551 return -EINVAL; 552 553 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices); 554 555 /* Negotiate two indexes, one for the second half which we are chained 556 * from, which is what we will return to user-space, and a second one 557 * which is used to store its first half. That first half does not 558 * allow any choice of placement, so it just needs to find the next 559 * available bit. We return the second half as fs->location because 560 * that helps with the rule lookup later on since the second half is 561 * chained from its first half, we can easily identify IPv6 CFP rules 562 * by looking whether they carry a CHAIN_ID. 563 * 564 * We also want the second half to have a lower rule_index than its 565 * first half because the HW search is by incrementing addresses. 566 */ 567 if (fs->location == RX_CLS_LOC_ANY) 568 rule_index[1] = find_first_zero_bit(priv->cfp.used, 569 priv->num_cfp_rules); 570 else 571 rule_index[1] = fs->location; 572 if (rule_index[1] > bcm_sf2_cfp_rule_size(priv)) 573 return -ENOSPC; 574 575 /* Flag it as used (cleared on error path) such that we can immediately 576 * obtain a second one to chain from. 577 */ 578 set_bit(rule_index[1], priv->cfp.used); 579 580 rule_index[0] = find_first_zero_bit(priv->cfp.used, 581 priv->num_cfp_rules); 582 if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) { 583 ret = -ENOSPC; 584 goto out_err; 585 } 586 587 /* Apply the UDF layout for this filter */ 588 bcm_sf2_cfp_udf_set(priv, layout, slice_num); 589 590 /* Apply to all packets received through this port */ 591 core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7)); 592 593 /* Source port map match */ 594 core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7)); 595 596 /* S-Tag status [31:30] 597 * C-Tag status [29:28] 598 * L2 framing [27:26] 599 * L3 framing [25:24] 600 * IP ToS [23:16] 601 * IP proto [15:08] 602 * IP Fragm [7] 603 * Non 1st frag [6] 604 * IP Authen [5] 605 * TTL range [4:3] 606 * PPPoE session [2] 607 * Reserved [1] 608 * UDF_Valid[8] [0] 609 */ 610 reg = 1 << L3_FRAMING_SHIFT | ip_proto << IPPROTO_SHIFT | 611 ip_frag << IP_FRAG_SHIFT | udf_upper_bits(num_udf); 612 core_writel(priv, reg, CORE_CFP_DATA_PORT(6)); 613 614 /* Mask with the specific layout for IPv6 packets including 615 * UDF_Valid[8] 616 */ 617 reg = layout->udfs[slice_num].mask_value | udf_upper_bits(num_udf); 618 core_writel(priv, reg, CORE_CFP_MASK_PORT(6)); 619 620 /* UDF_Valid[7:0] [31:24] 621 * S-Tag [23:8] 622 * C-Tag [7:0] 623 */ 624 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_DATA_PORT(5)); 625 626 /* Mask all but valid UDFs */ 627 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5)); 628 629 /* Slice the IPv6 source address and port */ 630 bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc, 631 slice_num, false); 632 bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc, 633 SLICE_NUM_MASK, true); 634 635 /* Insert into TCAM now because we need to insert a second rule */ 636 bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]); 637 638 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); 639 if (ret) { 640 pr_err("TCAM entry at addr %d failed\n", rule_index[0]); 641 goto out_err; 642 } 643 644 /* Insert into Action and policer RAMs now */ 645 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num, 646 queue_num, false); 647 if (ret) 648 goto out_err; 649 650 /* Now deal with the second slice to chain this rule */ 651 slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1); 652 if (slice_num == UDF_NUM_SLICES) { 653 ret = -EINVAL; 654 goto out_err; 655 } 656 657 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices); 658 659 /* Apply the UDF layout for this filter */ 660 bcm_sf2_cfp_udf_set(priv, layout, slice_num); 661 662 /* Chained rule, source port match is coming from the rule we are 663 * chained from. 664 */ 665 core_writel(priv, 0, CORE_CFP_DATA_PORT(7)); 666 core_writel(priv, 0, CORE_CFP_MASK_PORT(7)); 667 668 /* 669 * CHAIN ID [31:24] chain to previous slice 670 * Reserved [23:20] 671 * UDF_Valid[11:8] [19:16] 672 * UDF_Valid[7:0] [15:8] 673 * UDF_n_D11 [7:0] 674 */ 675 reg = rule_index[0] << 24 | udf_upper_bits(num_udf) << 16 | 676 udf_lower_bits(num_udf) << 8; 677 core_writel(priv, reg, CORE_CFP_DATA_PORT(6)); 678 679 /* Mask all except chain ID, UDF Valid[8] and UDF Valid[7:0] */ 680 reg = XCESS_ADDR_MASK << 24 | udf_upper_bits(num_udf) << 16 | 681 udf_lower_bits(num_udf) << 8; 682 core_writel(priv, reg, CORE_CFP_MASK_PORT(6)); 683 684 /* Don't care */ 685 core_writel(priv, 0, CORE_CFP_DATA_PORT(5)); 686 687 /* Mask all */ 688 core_writel(priv, 0, CORE_CFP_MASK_PORT(5)); 689 690 bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6dst, v6_spec->pdst, slice_num, 691 false); 692 bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6dst, v6_m_spec->pdst, 693 SLICE_NUM_MASK, true); 694 695 /* Insert into TCAM now */ 696 bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]); 697 698 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); 699 if (ret) { 700 pr_err("TCAM entry at addr %d failed\n", rule_index[1]); 701 goto out_err; 702 } 703 704 /* Insert into Action and policer RAMs now, set chain ID to 705 * the one we are chained to 706 */ 707 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port_num, 708 queue_num, true); 709 if (ret) 710 goto out_err; 711 712 /* Turn on CFP for this rule now */ 713 reg = core_readl(priv, CORE_CFP_CTL_REG); 714 reg |= BIT(port); 715 core_writel(priv, reg, CORE_CFP_CTL_REG); 716 717 /* Flag the second half rule as being used now, return it as the 718 * location, and flag it as unique while dumping rules 719 */ 720 set_bit(rule_index[0], priv->cfp.used); 721 set_bit(rule_index[1], priv->cfp.unique); 722 fs->location = rule_index[1]; 723 724 return ret; 725 726 out_err: 727 clear_bit(rule_index[1], priv->cfp.used); 728 return ret; 729 } 730 731 static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, 732 struct ethtool_rx_flow_spec *fs) 733 { 734 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 735 unsigned int queue_num, port_num; 736 int ret = -EINVAL; 737 738 /* Check for unsupported extensions */ 739 if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype || 740 fs->m_ext.data[1])) 741 return -EINVAL; 742 743 if (fs->location != RX_CLS_LOC_ANY && 744 test_bit(fs->location, priv->cfp.used)) 745 return -EBUSY; 746 747 if (fs->location != RX_CLS_LOC_ANY && 748 fs->location > bcm_sf2_cfp_rule_size(priv)) 749 return -EINVAL; 750 751 /* We do not support discarding packets, check that the 752 * destination port is enabled and that we are within the 753 * number of ports supported by the switch 754 */ 755 port_num = fs->ring_cookie / SF2_NUM_EGRESS_QUEUES; 756 757 if (fs->ring_cookie == RX_CLS_FLOW_DISC || 758 !dsa_is_user_port(ds, port_num) || 759 port_num >= priv->hw_params.num_ports) 760 return -EINVAL; 761 /* 762 * We have a small oddity where Port 6 just does not have a 763 * valid bit here (so we substract by one). 764 */ 765 queue_num = fs->ring_cookie % SF2_NUM_EGRESS_QUEUES; 766 if (port_num >= 7) 767 port_num -= 1; 768 769 switch (fs->flow_type & ~FLOW_EXT) { 770 case TCP_V4_FLOW: 771 case UDP_V4_FLOW: 772 ret = bcm_sf2_cfp_ipv4_rule_set(priv, port, port_num, 773 queue_num, fs); 774 break; 775 case TCP_V6_FLOW: 776 case UDP_V6_FLOW: 777 ret = bcm_sf2_cfp_ipv6_rule_set(priv, port, port_num, 778 queue_num, fs); 779 break; 780 default: 781 break; 782 } 783 784 return ret; 785 } 786 787 static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port, 788 u32 loc, u32 *next_loc) 789 { 790 int ret; 791 u32 reg; 792 793 /* Indicate which rule we want to read */ 794 bcm_sf2_cfp_rule_addr_set(priv, loc); 795 796 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL); 797 if (ret) 798 return ret; 799 800 /* Check if this is possibly an IPv6 rule that would 801 * indicate we need to delete its companion rule 802 * as well 803 */ 804 reg = core_readl(priv, CORE_CFP_DATA_PORT(6)); 805 if (next_loc) 806 *next_loc = (reg >> 24) & CHAIN_ID_MASK; 807 808 /* Clear its valid bits */ 809 reg = core_readl(priv, CORE_CFP_DATA_PORT(0)); 810 reg &= ~SLICE_VALID; 811 core_writel(priv, reg, CORE_CFP_DATA_PORT(0)); 812 813 /* Write back this entry into the TCAM now */ 814 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); 815 if (ret) 816 return ret; 817 818 clear_bit(loc, priv->cfp.used); 819 clear_bit(loc, priv->cfp.unique); 820 821 return 0; 822 } 823 824 static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, 825 u32 loc) 826 { 827 u32 next_loc = 0; 828 int ret; 829 830 /* Refuse deleting unused rules, and those that are not unique since 831 * that could leave IPv6 rules with one of the chained rule in the 832 * table. 833 */ 834 if (!test_bit(loc, priv->cfp.unique) || loc == 0) 835 return -EINVAL; 836 837 ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc); 838 if (ret) 839 return ret; 840 841 /* If this was an IPv6 rule, delete is companion rule too */ 842 if (next_loc) 843 ret = bcm_sf2_cfp_rule_del_one(priv, port, next_loc, NULL); 844 845 return ret; 846 } 847 848 static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow) 849 { 850 unsigned int i; 851 852 for (i = 0; i < sizeof(flow->m_u); i++) 853 flow->m_u.hdata[i] ^= 0xff; 854 855 flow->m_ext.vlan_etype ^= cpu_to_be16(~0); 856 flow->m_ext.vlan_tci ^= cpu_to_be16(~0); 857 flow->m_ext.data[0] ^= cpu_to_be32(~0); 858 flow->m_ext.data[1] ^= cpu_to_be32(~0); 859 } 860 861 static int bcm_sf2_cfp_unslice_ipv4(struct bcm_sf2_priv *priv, 862 struct ethtool_tcpip4_spec *v4_spec, 863 bool mask) 864 { 865 u32 reg, offset, ipv4; 866 u16 src_dst_port; 867 868 if (mask) 869 offset = CORE_CFP_MASK_PORT(3); 870 else 871 offset = CORE_CFP_DATA_PORT(3); 872 873 reg = core_readl(priv, offset); 874 /* src port [15:8] */ 875 src_dst_port = reg << 8; 876 877 if (mask) 878 offset = CORE_CFP_MASK_PORT(2); 879 else 880 offset = CORE_CFP_DATA_PORT(2); 881 882 reg = core_readl(priv, offset); 883 /* src port [7:0] */ 884 src_dst_port |= (reg >> 24); 885 886 v4_spec->pdst = cpu_to_be16(src_dst_port); 887 v4_spec->psrc = cpu_to_be16((u16)(reg >> 8)); 888 889 /* IPv4 dst [15:8] */ 890 ipv4 = (reg & 0xff) << 8; 891 892 if (mask) 893 offset = CORE_CFP_MASK_PORT(1); 894 else 895 offset = CORE_CFP_DATA_PORT(1); 896 897 reg = core_readl(priv, offset); 898 /* IPv4 dst [31:16] */ 899 ipv4 |= ((reg >> 8) & 0xffff) << 16; 900 /* IPv4 dst [7:0] */ 901 ipv4 |= (reg >> 24) & 0xff; 902 v4_spec->ip4dst = cpu_to_be32(ipv4); 903 904 /* IPv4 src [15:8] */ 905 ipv4 = (reg & 0xff) << 8; 906 907 if (mask) 908 offset = CORE_CFP_MASK_PORT(0); 909 else 910 offset = CORE_CFP_DATA_PORT(0); 911 reg = core_readl(priv, offset); 912 913 /* Once the TCAM is programmed, the mask reflects the slice number 914 * being matched, don't bother checking it when reading back the 915 * mask spec 916 */ 917 if (!mask && !(reg & SLICE_VALID)) 918 return -EINVAL; 919 920 /* IPv4 src [7:0] */ 921 ipv4 |= (reg >> 24) & 0xff; 922 /* IPv4 src [31:16] */ 923 ipv4 |= ((reg >> 8) & 0xffff) << 16; 924 v4_spec->ip4src = cpu_to_be32(ipv4); 925 926 return 0; 927 } 928 929 static int bcm_sf2_cfp_ipv4_rule_get(struct bcm_sf2_priv *priv, int port, 930 struct ethtool_rx_flow_spec *fs) 931 { 932 struct ethtool_tcpip4_spec *v4_spec = NULL, *v4_m_spec = NULL; 933 u32 reg; 934 int ret; 935 936 reg = core_readl(priv, CORE_CFP_DATA_PORT(6)); 937 938 switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) { 939 case IPPROTO_TCP: 940 fs->flow_type = TCP_V4_FLOW; 941 v4_spec = &fs->h_u.tcp_ip4_spec; 942 v4_m_spec = &fs->m_u.tcp_ip4_spec; 943 break; 944 case IPPROTO_UDP: 945 fs->flow_type = UDP_V4_FLOW; 946 v4_spec = &fs->h_u.udp_ip4_spec; 947 v4_m_spec = &fs->m_u.udp_ip4_spec; 948 break; 949 default: 950 return -EINVAL; 951 } 952 953 fs->m_ext.data[0] = cpu_to_be32((reg >> IP_FRAG_SHIFT) & 1); 954 v4_spec->tos = (reg >> IPTOS_SHIFT) & IPTOS_MASK; 955 956 ret = bcm_sf2_cfp_unslice_ipv4(priv, v4_spec, false); 957 if (ret) 958 return ret; 959 960 return bcm_sf2_cfp_unslice_ipv4(priv, v4_m_spec, true); 961 } 962 963 static int bcm_sf2_cfp_unslice_ipv6(struct bcm_sf2_priv *priv, 964 __be32 *ip6_addr, __be16 *port, 965 bool mask) 966 { 967 u32 reg, tmp, offset; 968 969 /* C-Tag [31:24] 970 * UDF_n_B8 [23:8] (port) 971 * UDF_n_B7 (upper) [7:0] (addr[15:8]) 972 */ 973 if (mask) 974 offset = CORE_CFP_MASK_PORT(4); 975 else 976 offset = CORE_CFP_DATA_PORT(4); 977 reg = core_readl(priv, offset); 978 *port = cpu_to_be32(reg) >> 8; 979 tmp = (u32)(reg & 0xff) << 8; 980 981 /* UDF_n_B7 (lower) [31:24] (addr[7:0]) 982 * UDF_n_B6 [23:8] (addr[31:16]) 983 * UDF_n_B5 (upper) [7:0] (addr[47:40]) 984 */ 985 if (mask) 986 offset = CORE_CFP_MASK_PORT(3); 987 else 988 offset = CORE_CFP_DATA_PORT(3); 989 reg = core_readl(priv, offset); 990 tmp |= (reg >> 24) & 0xff; 991 tmp |= (u32)((reg >> 8) << 16); 992 ip6_addr[3] = cpu_to_be32(tmp); 993 tmp = (u32)(reg & 0xff) << 8; 994 995 /* UDF_n_B5 (lower) [31:24] (addr[39:32]) 996 * UDF_n_B4 [23:8] (addr[63:48]) 997 * UDF_n_B3 (upper) [7:0] (addr[79:72]) 998 */ 999 if (mask) 1000 offset = CORE_CFP_MASK_PORT(2); 1001 else 1002 offset = CORE_CFP_DATA_PORT(2); 1003 reg = core_readl(priv, offset); 1004 tmp |= (reg >> 24) & 0xff; 1005 tmp |= (u32)((reg >> 8) << 16); 1006 ip6_addr[2] = cpu_to_be32(tmp); 1007 tmp = (u32)(reg & 0xff) << 8; 1008 1009 /* UDF_n_B3 (lower) [31:24] (addr[71:64]) 1010 * UDF_n_B2 [23:8] (addr[95:80]) 1011 * UDF_n_B1 (upper) [7:0] (addr[111:104]) 1012 */ 1013 if (mask) 1014 offset = CORE_CFP_MASK_PORT(1); 1015 else 1016 offset = CORE_CFP_DATA_PORT(1); 1017 reg = core_readl(priv, offset); 1018 tmp |= (reg >> 24) & 0xff; 1019 tmp |= (u32)((reg >> 8) << 16); 1020 ip6_addr[1] = cpu_to_be32(tmp); 1021 tmp = (u32)(reg & 0xff) << 8; 1022 1023 /* UDF_n_B1 (lower) [31:24] (addr[103:96]) 1024 * UDF_n_B0 [23:8] (addr[127:112]) 1025 * Reserved [7:4] 1026 * Slice ID [3:2] 1027 * Slice valid [1:0] 1028 */ 1029 if (mask) 1030 offset = CORE_CFP_MASK_PORT(0); 1031 else 1032 offset = CORE_CFP_DATA_PORT(0); 1033 reg = core_readl(priv, offset); 1034 tmp |= (reg >> 24) & 0xff; 1035 tmp |= (u32)((reg >> 8) << 16); 1036 ip6_addr[0] = cpu_to_be32(tmp); 1037 1038 if (!mask && !(reg & SLICE_VALID)) 1039 return -EINVAL; 1040 1041 return 0; 1042 } 1043 1044 static int bcm_sf2_cfp_ipv6_rule_get(struct bcm_sf2_priv *priv, int port, 1045 struct ethtool_rx_flow_spec *fs, 1046 u32 next_loc) 1047 { 1048 struct ethtool_tcpip6_spec *v6_spec = NULL, *v6_m_spec = NULL; 1049 u32 reg; 1050 int ret; 1051 1052 /* UDPv6 and TCPv6 both use ethtool_tcpip6_spec so we are fine 1053 * assuming tcp_ip6_spec here being an union. 1054 */ 1055 v6_spec = &fs->h_u.tcp_ip6_spec; 1056 v6_m_spec = &fs->m_u.tcp_ip6_spec; 1057 1058 /* Read the second half first */ 1059 ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6dst, &v6_spec->pdst, 1060 false); 1061 if (ret) 1062 return ret; 1063 1064 ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6dst, 1065 &v6_m_spec->pdst, true); 1066 if (ret) 1067 return ret; 1068 1069 /* Read last to avoid next entry clobbering the results during search 1070 * operations. We would not have the port enabled for this rule, so 1071 * don't bother checking it. 1072 */ 1073 (void)core_readl(priv, CORE_CFP_DATA_PORT(7)); 1074 1075 /* The slice number is valid, so read the rule we are chained from now 1076 * which is our first half. 1077 */ 1078 bcm_sf2_cfp_rule_addr_set(priv, next_loc); 1079 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL); 1080 if (ret) 1081 return ret; 1082 1083 reg = core_readl(priv, CORE_CFP_DATA_PORT(6)); 1084 1085 switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) { 1086 case IPPROTO_TCP: 1087 fs->flow_type = TCP_V6_FLOW; 1088 break; 1089 case IPPROTO_UDP: 1090 fs->flow_type = UDP_V6_FLOW; 1091 break; 1092 default: 1093 return -EINVAL; 1094 } 1095 1096 ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6src, &v6_spec->psrc, 1097 false); 1098 if (ret) 1099 return ret; 1100 1101 return bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6src, 1102 &v6_m_spec->psrc, true); 1103 } 1104 1105 static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port, 1106 struct ethtool_rxnfc *nfc) 1107 { 1108 u32 reg, ipv4_or_chain_id; 1109 unsigned int queue_num; 1110 int ret; 1111 1112 bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location); 1113 1114 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM); 1115 if (ret) 1116 return ret; 1117 1118 reg = core_readl(priv, CORE_ACT_POL_DATA0); 1119 1120 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL); 1121 if (ret) 1122 return ret; 1123 1124 /* Extract the destination port */ 1125 nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) & 1126 DST_MAP_IB_MASK) - 1; 1127 1128 /* There is no Port 6, so we compensate for that here */ 1129 if (nfc->fs.ring_cookie >= 6) 1130 nfc->fs.ring_cookie++; 1131 nfc->fs.ring_cookie *= SF2_NUM_EGRESS_QUEUES; 1132 1133 /* Extract the destination queue */ 1134 queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK; 1135 nfc->fs.ring_cookie += queue_num; 1136 1137 /* Extract the L3_FRAMING or CHAIN_ID */ 1138 reg = core_readl(priv, CORE_CFP_DATA_PORT(6)); 1139 1140 /* With IPv6 rules this would contain a non-zero chain ID since 1141 * we reserve entry 0 and it cannot be used. So if we read 0 here 1142 * this means an IPv4 rule. 1143 */ 1144 ipv4_or_chain_id = (reg >> L3_FRAMING_SHIFT) & 0xff; 1145 if (ipv4_or_chain_id == 0) 1146 ret = bcm_sf2_cfp_ipv4_rule_get(priv, port, &nfc->fs); 1147 else 1148 ret = bcm_sf2_cfp_ipv6_rule_get(priv, port, &nfc->fs, 1149 ipv4_or_chain_id); 1150 if (ret) 1151 return ret; 1152 1153 /* Read last to avoid next entry clobbering the results during search 1154 * operations 1155 */ 1156 reg = core_readl(priv, CORE_CFP_DATA_PORT(7)); 1157 if (!(reg & 1 << port)) 1158 return -EINVAL; 1159 1160 bcm_sf2_invert_masks(&nfc->fs); 1161 1162 /* Put the TCAM size here */ 1163 nfc->data = bcm_sf2_cfp_rule_size(priv); 1164 1165 return 0; 1166 } 1167 1168 /* We implement the search doing a TCAM search operation */ 1169 static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv, 1170 int port, struct ethtool_rxnfc *nfc, 1171 u32 *rule_locs) 1172 { 1173 unsigned int index = 1, rules_cnt = 0; 1174 1175 for_each_set_bit_from(index, priv->cfp.unique, priv->num_cfp_rules) { 1176 rule_locs[rules_cnt] = index; 1177 rules_cnt++; 1178 } 1179 1180 /* Put the TCAM size here */ 1181 nfc->data = bcm_sf2_cfp_rule_size(priv); 1182 nfc->rule_cnt = rules_cnt; 1183 1184 return 0; 1185 } 1186 1187 int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, 1188 struct ethtool_rxnfc *nfc, u32 *rule_locs) 1189 { 1190 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 1191 int ret = 0; 1192 1193 mutex_lock(&priv->cfp.lock); 1194 1195 switch (nfc->cmd) { 1196 case ETHTOOL_GRXCLSRLCNT: 1197 /* Subtract the default, unusable rule */ 1198 nfc->rule_cnt = bitmap_weight(priv->cfp.unique, 1199 priv->num_cfp_rules) - 1; 1200 /* We support specifying rule locations */ 1201 nfc->data |= RX_CLS_LOC_SPECIAL; 1202 break; 1203 case ETHTOOL_GRXCLSRULE: 1204 ret = bcm_sf2_cfp_rule_get(priv, port, nfc); 1205 break; 1206 case ETHTOOL_GRXCLSRLALL: 1207 ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs); 1208 break; 1209 default: 1210 ret = -EOPNOTSUPP; 1211 break; 1212 } 1213 1214 mutex_unlock(&priv->cfp.lock); 1215 1216 return ret; 1217 } 1218 1219 int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port, 1220 struct ethtool_rxnfc *nfc) 1221 { 1222 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 1223 int ret = 0; 1224 1225 mutex_lock(&priv->cfp.lock); 1226 1227 switch (nfc->cmd) { 1228 case ETHTOOL_SRXCLSRLINS: 1229 ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs); 1230 break; 1231 1232 case ETHTOOL_SRXCLSRLDEL: 1233 ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location); 1234 break; 1235 default: 1236 ret = -EOPNOTSUPP; 1237 break; 1238 } 1239 1240 mutex_unlock(&priv->cfp.lock); 1241 1242 return ret; 1243 } 1244 1245 int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv) 1246 { 1247 unsigned int timeout = 1000; 1248 u32 reg; 1249 1250 reg = core_readl(priv, CORE_CFP_ACC); 1251 reg |= TCAM_RESET; 1252 core_writel(priv, reg, CORE_CFP_ACC); 1253 1254 do { 1255 reg = core_readl(priv, CORE_CFP_ACC); 1256 if (!(reg & TCAM_RESET)) 1257 break; 1258 1259 cpu_relax(); 1260 } while (timeout--); 1261 1262 if (!timeout) 1263 return -ETIMEDOUT; 1264 1265 return 0; 1266 } 1267