1 /* 2 * B53 switch driver main logic 3 * 4 * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org> 5 * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com> 6 * 7 * Permission to use, copy, modify, and/or distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <linux/delay.h> 21 #include <linux/export.h> 22 #include <linux/gpio.h> 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/platform_data/b53.h> 26 #include <linux/phy.h> 27 #include <linux/phylink.h> 28 #include <linux/etherdevice.h> 29 #include <linux/if_bridge.h> 30 #include <net/dsa.h> 31 32 #include "b53_regs.h" 33 #include "b53_priv.h" 34 35 struct b53_mib_desc { 36 u8 size; 37 u8 offset; 38 const char *name; 39 }; 40 41 /* BCM5365 MIB counters */ 42 static const struct b53_mib_desc b53_mibs_65[] = { 43 { 8, 0x00, "TxOctets" }, 44 { 4, 0x08, "TxDropPkts" }, 45 { 4, 0x10, "TxBroadcastPkts" }, 46 { 4, 0x14, "TxMulticastPkts" }, 47 { 4, 0x18, "TxUnicastPkts" }, 48 { 4, 0x1c, "TxCollisions" }, 49 { 4, 0x20, "TxSingleCollision" }, 50 { 4, 0x24, "TxMultipleCollision" }, 51 { 4, 0x28, "TxDeferredTransmit" }, 52 { 4, 0x2c, "TxLateCollision" }, 53 { 4, 0x30, "TxExcessiveCollision" }, 54 { 4, 0x38, "TxPausePkts" }, 55 { 8, 0x44, "RxOctets" }, 56 { 4, 0x4c, "RxUndersizePkts" }, 57 { 4, 0x50, "RxPausePkts" }, 58 { 4, 0x54, "Pkts64Octets" }, 59 { 4, 0x58, "Pkts65to127Octets" }, 60 { 4, 0x5c, "Pkts128to255Octets" }, 61 { 4, 0x60, "Pkts256to511Octets" }, 62 { 4, 0x64, "Pkts512to1023Octets" }, 63 { 4, 0x68, "Pkts1024to1522Octets" }, 64 { 4, 0x6c, "RxOversizePkts" }, 65 { 4, 0x70, "RxJabbers" }, 66 { 4, 0x74, "RxAlignmentErrors" }, 67 { 4, 0x78, "RxFCSErrors" }, 68 { 8, 0x7c, "RxGoodOctets" }, 69 { 4, 0x84, "RxDropPkts" }, 70 { 4, 0x88, "RxUnicastPkts" }, 71 { 4, 0x8c, "RxMulticastPkts" }, 72 { 4, 0x90, "RxBroadcastPkts" }, 73 { 4, 0x94, "RxSAChanges" }, 74 { 4, 0x98, "RxFragments" }, 75 }; 76 77 #define B53_MIBS_65_SIZE ARRAY_SIZE(b53_mibs_65) 78 79 /* BCM63xx MIB counters */ 80 static const struct b53_mib_desc b53_mibs_63xx[] = { 81 { 8, 0x00, "TxOctets" }, 82 { 4, 0x08, "TxDropPkts" }, 83 { 4, 0x0c, "TxQoSPkts" }, 84 { 4, 0x10, "TxBroadcastPkts" }, 85 { 4, 0x14, "TxMulticastPkts" }, 86 { 4, 0x18, "TxUnicastPkts" }, 87 { 4, 0x1c, "TxCollisions" }, 88 { 4, 0x20, "TxSingleCollision" }, 89 { 4, 0x24, "TxMultipleCollision" }, 90 { 4, 0x28, "TxDeferredTransmit" }, 91 { 4, 0x2c, "TxLateCollision" }, 92 { 4, 0x30, "TxExcessiveCollision" }, 93 { 4, 0x38, "TxPausePkts" }, 94 { 8, 0x3c, "TxQoSOctets" }, 95 { 8, 0x44, "RxOctets" }, 96 { 4, 0x4c, "RxUndersizePkts" }, 97 { 4, 0x50, "RxPausePkts" }, 98 { 4, 0x54, "Pkts64Octets" }, 99 { 4, 0x58, "Pkts65to127Octets" }, 100 { 4, 0x5c, "Pkts128to255Octets" }, 101 { 4, 0x60, "Pkts256to511Octets" }, 102 { 4, 0x64, "Pkts512to1023Octets" }, 103 { 4, 0x68, "Pkts1024to1522Octets" }, 104 { 4, 0x6c, "RxOversizePkts" }, 105 { 4, 0x70, "RxJabbers" }, 106 { 4, 0x74, "RxAlignmentErrors" }, 107 { 4, 0x78, "RxFCSErrors" }, 108 { 8, 0x7c, "RxGoodOctets" }, 109 { 4, 0x84, "RxDropPkts" }, 110 { 4, 0x88, "RxUnicastPkts" }, 111 { 4, 0x8c, "RxMulticastPkts" }, 112 { 4, 0x90, "RxBroadcastPkts" }, 113 { 4, 0x94, "RxSAChanges" }, 114 { 4, 0x98, "RxFragments" }, 115 { 4, 0xa0, "RxSymbolErrors" }, 116 { 4, 0xa4, "RxQoSPkts" }, 117 { 8, 0xa8, "RxQoSOctets" }, 118 { 4, 0xb0, "Pkts1523to2047Octets" }, 119 { 4, 0xb4, "Pkts2048to4095Octets" }, 120 { 4, 0xb8, "Pkts4096to8191Octets" }, 121 { 4, 0xbc, "Pkts8192to9728Octets" }, 122 { 4, 0xc0, "RxDiscarded" }, 123 }; 124 125 #define B53_MIBS_63XX_SIZE ARRAY_SIZE(b53_mibs_63xx) 126 127 /* MIB counters */ 128 static const struct b53_mib_desc b53_mibs[] = { 129 { 8, 0x00, "TxOctets" }, 130 { 4, 0x08, "TxDropPkts" }, 131 { 4, 0x10, "TxBroadcastPkts" }, 132 { 4, 0x14, "TxMulticastPkts" }, 133 { 4, 0x18, "TxUnicastPkts" }, 134 { 4, 0x1c, "TxCollisions" }, 135 { 4, 0x20, "TxSingleCollision" }, 136 { 4, 0x24, "TxMultipleCollision" }, 137 { 4, 0x28, "TxDeferredTransmit" }, 138 { 4, 0x2c, "TxLateCollision" }, 139 { 4, 0x30, "TxExcessiveCollision" }, 140 { 4, 0x38, "TxPausePkts" }, 141 { 8, 0x50, "RxOctets" }, 142 { 4, 0x58, "RxUndersizePkts" }, 143 { 4, 0x5c, "RxPausePkts" }, 144 { 4, 0x60, "Pkts64Octets" }, 145 { 4, 0x64, "Pkts65to127Octets" }, 146 { 4, 0x68, "Pkts128to255Octets" }, 147 { 4, 0x6c, "Pkts256to511Octets" }, 148 { 4, 0x70, "Pkts512to1023Octets" }, 149 { 4, 0x74, "Pkts1024to1522Octets" }, 150 { 4, 0x78, "RxOversizePkts" }, 151 { 4, 0x7c, "RxJabbers" }, 152 { 4, 0x80, "RxAlignmentErrors" }, 153 { 4, 0x84, "RxFCSErrors" }, 154 { 8, 0x88, "RxGoodOctets" }, 155 { 4, 0x90, "RxDropPkts" }, 156 { 4, 0x94, "RxUnicastPkts" }, 157 { 4, 0x98, "RxMulticastPkts" }, 158 { 4, 0x9c, "RxBroadcastPkts" }, 159 { 4, 0xa0, "RxSAChanges" }, 160 { 4, 0xa4, "RxFragments" }, 161 { 4, 0xa8, "RxJumboPkts" }, 162 { 4, 0xac, "RxSymbolErrors" }, 163 { 4, 0xc0, "RxDiscarded" }, 164 }; 165 166 #define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs) 167 168 static const struct b53_mib_desc b53_mibs_58xx[] = { 169 { 8, 0x00, "TxOctets" }, 170 { 4, 0x08, "TxDropPkts" }, 171 { 4, 0x0c, "TxQPKTQ0" }, 172 { 4, 0x10, "TxBroadcastPkts" }, 173 { 4, 0x14, "TxMulticastPkts" }, 174 { 4, 0x18, "TxUnicastPKts" }, 175 { 4, 0x1c, "TxCollisions" }, 176 { 4, 0x20, "TxSingleCollision" }, 177 { 4, 0x24, "TxMultipleCollision" }, 178 { 4, 0x28, "TxDeferredCollision" }, 179 { 4, 0x2c, "TxLateCollision" }, 180 { 4, 0x30, "TxExcessiveCollision" }, 181 { 4, 0x34, "TxFrameInDisc" }, 182 { 4, 0x38, "TxPausePkts" }, 183 { 4, 0x3c, "TxQPKTQ1" }, 184 { 4, 0x40, "TxQPKTQ2" }, 185 { 4, 0x44, "TxQPKTQ3" }, 186 { 4, 0x48, "TxQPKTQ4" }, 187 { 4, 0x4c, "TxQPKTQ5" }, 188 { 8, 0x50, "RxOctets" }, 189 { 4, 0x58, "RxUndersizePkts" }, 190 { 4, 0x5c, "RxPausePkts" }, 191 { 4, 0x60, "RxPkts64Octets" }, 192 { 4, 0x64, "RxPkts65to127Octets" }, 193 { 4, 0x68, "RxPkts128to255Octets" }, 194 { 4, 0x6c, "RxPkts256to511Octets" }, 195 { 4, 0x70, "RxPkts512to1023Octets" }, 196 { 4, 0x74, "RxPkts1024toMaxPktsOctets" }, 197 { 4, 0x78, "RxOversizePkts" }, 198 { 4, 0x7c, "RxJabbers" }, 199 { 4, 0x80, "RxAlignmentErrors" }, 200 { 4, 0x84, "RxFCSErrors" }, 201 { 8, 0x88, "RxGoodOctets" }, 202 { 4, 0x90, "RxDropPkts" }, 203 { 4, 0x94, "RxUnicastPkts" }, 204 { 4, 0x98, "RxMulticastPkts" }, 205 { 4, 0x9c, "RxBroadcastPkts" }, 206 { 4, 0xa0, "RxSAChanges" }, 207 { 4, 0xa4, "RxFragments" }, 208 { 4, 0xa8, "RxJumboPkt" }, 209 { 4, 0xac, "RxSymblErr" }, 210 { 4, 0xb0, "InRangeErrCount" }, 211 { 4, 0xb4, "OutRangeErrCount" }, 212 { 4, 0xb8, "EEELpiEvent" }, 213 { 4, 0xbc, "EEELpiDuration" }, 214 { 4, 0xc0, "RxDiscard" }, 215 { 4, 0xc8, "TxQPKTQ6" }, 216 { 4, 0xcc, "TxQPKTQ7" }, 217 { 4, 0xd0, "TxPkts64Octets" }, 218 { 4, 0xd4, "TxPkts65to127Octets" }, 219 { 4, 0xd8, "TxPkts128to255Octets" }, 220 { 4, 0xdc, "TxPkts256to511Ocets" }, 221 { 4, 0xe0, "TxPkts512to1023Ocets" }, 222 { 4, 0xe4, "TxPkts1024toMaxPktOcets" }, 223 }; 224 225 #define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx) 226 227 static int b53_do_vlan_op(struct b53_device *dev, u8 op) 228 { 229 unsigned int i; 230 231 b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op); 232 233 for (i = 0; i < 10; i++) { 234 u8 vta; 235 236 b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta); 237 if (!(vta & VTA_START_CMD)) 238 return 0; 239 240 usleep_range(100, 200); 241 } 242 243 return -EIO; 244 } 245 246 static void b53_set_vlan_entry(struct b53_device *dev, u16 vid, 247 struct b53_vlan *vlan) 248 { 249 if (is5325(dev)) { 250 u32 entry = 0; 251 252 if (vlan->members) { 253 entry = ((vlan->untag & VA_UNTAG_MASK_25) << 254 VA_UNTAG_S_25) | vlan->members; 255 if (dev->core_rev >= 3) 256 entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S; 257 else 258 entry |= VA_VALID_25; 259 } 260 261 b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry); 262 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid | 263 VTA_RW_STATE_WR | VTA_RW_OP_EN); 264 } else if (is5365(dev)) { 265 u16 entry = 0; 266 267 if (vlan->members) 268 entry = ((vlan->untag & VA_UNTAG_MASK_65) << 269 VA_UNTAG_S_65) | vlan->members | VA_VALID_65; 270 271 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry); 272 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid | 273 VTA_RW_STATE_WR | VTA_RW_OP_EN); 274 } else { 275 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid); 276 b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], 277 (vlan->untag << VTE_UNTAG_S) | vlan->members); 278 279 b53_do_vlan_op(dev, VTA_CMD_WRITE); 280 } 281 282 dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n", 283 vid, vlan->members, vlan->untag); 284 } 285 286 static void b53_get_vlan_entry(struct b53_device *dev, u16 vid, 287 struct b53_vlan *vlan) 288 { 289 if (is5325(dev)) { 290 u32 entry = 0; 291 292 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid | 293 VTA_RW_STATE_RD | VTA_RW_OP_EN); 294 b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry); 295 296 if (dev->core_rev >= 3) 297 vlan->valid = !!(entry & VA_VALID_25_R4); 298 else 299 vlan->valid = !!(entry & VA_VALID_25); 300 vlan->members = entry & VA_MEMBER_MASK; 301 vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25; 302 303 } else if (is5365(dev)) { 304 u16 entry = 0; 305 306 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid | 307 VTA_RW_STATE_WR | VTA_RW_OP_EN); 308 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry); 309 310 vlan->valid = !!(entry & VA_VALID_65); 311 vlan->members = entry & VA_MEMBER_MASK; 312 vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65; 313 } else { 314 u32 entry = 0; 315 316 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid); 317 b53_do_vlan_op(dev, VTA_CMD_READ); 318 b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry); 319 vlan->members = entry & VTE_MEMBERS; 320 vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS; 321 vlan->valid = true; 322 } 323 } 324 325 static void b53_set_forwarding(struct b53_device *dev, int enable) 326 { 327 u8 mgmt; 328 329 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 330 331 if (enable) 332 mgmt |= SM_SW_FWD_EN; 333 else 334 mgmt &= ~SM_SW_FWD_EN; 335 336 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 337 338 /* Include IMP port in dumb forwarding mode 339 */ 340 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt); 341 mgmt |= B53_MII_DUMB_FWDG_EN; 342 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); 343 344 /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether 345 * frames should be flooded or not. 346 */ 347 b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); 348 mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN; 349 b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); 350 } 351 352 static void b53_enable_vlan(struct b53_device *dev, int port, bool enable, 353 bool enable_filtering) 354 { 355 u8 mgmt, vc0, vc1, vc4 = 0, vc5; 356 357 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 358 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0); 359 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1); 360 361 if (is5325(dev) || is5365(dev)) { 362 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4); 363 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5); 364 } else if (is63xx(dev)) { 365 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4); 366 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5); 367 } else { 368 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4); 369 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5); 370 } 371 372 if (enable) { 373 vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; 374 vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN; 375 vc4 &= ~VC4_ING_VID_CHECK_MASK; 376 if (enable_filtering) { 377 vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; 378 vc5 |= VC5_DROP_VTABLE_MISS; 379 } else { 380 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; 381 vc5 &= ~VC5_DROP_VTABLE_MISS; 382 } 383 384 if (is5325(dev)) 385 vc0 &= ~VC0_RESERVED_1; 386 387 if (is5325(dev) || is5365(dev)) 388 vc1 |= VC1_RX_MCST_TAG_EN; 389 390 } else { 391 vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID); 392 vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN); 393 vc4 &= ~VC4_ING_VID_CHECK_MASK; 394 vc5 &= ~VC5_DROP_VTABLE_MISS; 395 396 if (is5325(dev) || is5365(dev)) 397 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; 398 else 399 vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S; 400 401 if (is5325(dev) || is5365(dev)) 402 vc1 &= ~VC1_RX_MCST_TAG_EN; 403 } 404 405 if (!is5325(dev) && !is5365(dev)) 406 vc5 &= ~VC5_VID_FFF_EN; 407 408 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0); 409 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1); 410 411 if (is5325(dev) || is5365(dev)) { 412 /* enable the high 8 bit vid check on 5325 */ 413 if (is5325(dev) && enable) 414 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 415 VC3_HIGH_8BIT_EN); 416 else 417 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0); 418 419 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4); 420 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5); 421 } else if (is63xx(dev)) { 422 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0); 423 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4); 424 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5); 425 } else { 426 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0); 427 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4); 428 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5); 429 } 430 431 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 432 433 dev->vlan_enabled = enable; 434 435 dev_dbg(dev->dev, "Port %d VLAN enabled: %d, filtering: %d\n", 436 port, enable, enable_filtering); 437 } 438 439 static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) 440 { 441 u32 port_mask = 0; 442 u16 max_size = JMS_MIN_SIZE; 443 444 if (is5325(dev) || is5365(dev)) 445 return -EINVAL; 446 447 if (enable) { 448 port_mask = dev->enabled_ports; 449 max_size = JMS_MAX_SIZE; 450 if (allow_10_100) 451 port_mask |= JPM_10_100_JUMBO_EN; 452 } 453 454 b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask); 455 return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size); 456 } 457 458 static int b53_flush_arl(struct b53_device *dev, u8 mask) 459 { 460 unsigned int i; 461 462 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, 463 FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask); 464 465 for (i = 0; i < 10; i++) { 466 u8 fast_age_ctrl; 467 468 b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, 469 &fast_age_ctrl); 470 471 if (!(fast_age_ctrl & FAST_AGE_DONE)) 472 goto out; 473 474 msleep(1); 475 } 476 477 return -ETIMEDOUT; 478 out: 479 /* Only age dynamic entries (default behavior) */ 480 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC); 481 return 0; 482 } 483 484 static int b53_fast_age_port(struct b53_device *dev, int port) 485 { 486 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port); 487 488 return b53_flush_arl(dev, FAST_AGE_PORT); 489 } 490 491 static int b53_fast_age_vlan(struct b53_device *dev, u16 vid) 492 { 493 b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid); 494 495 return b53_flush_arl(dev, FAST_AGE_VLAN); 496 } 497 498 void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) 499 { 500 struct b53_device *dev = ds->priv; 501 unsigned int i; 502 u16 pvlan; 503 504 /* Enable the IMP port to be in the same VLAN as the other ports 505 * on a per-port basis such that we only have Port i and IMP in 506 * the same VLAN. 507 */ 508 b53_for_each_port(dev, i) { 509 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan); 510 pvlan |= BIT(cpu_port); 511 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan); 512 } 513 } 514 EXPORT_SYMBOL(b53_imp_vlan_setup); 515 516 static void b53_port_set_ucast_flood(struct b53_device *dev, int port, 517 bool unicast) 518 { 519 u16 uc; 520 521 b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc); 522 if (unicast) 523 uc |= BIT(port); 524 else 525 uc &= ~BIT(port); 526 b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc); 527 } 528 529 static void b53_port_set_mcast_flood(struct b53_device *dev, int port, 530 bool multicast) 531 { 532 u16 mc; 533 534 b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc); 535 if (multicast) 536 mc |= BIT(port); 537 else 538 mc &= ~BIT(port); 539 b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc); 540 541 b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc); 542 if (multicast) 543 mc |= BIT(port); 544 else 545 mc &= ~BIT(port); 546 b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc); 547 } 548 549 static void b53_port_set_learning(struct b53_device *dev, int port, 550 bool learning) 551 { 552 u16 reg; 553 554 b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, ®); 555 if (learning) 556 reg &= ~BIT(port); 557 else 558 reg |= BIT(port); 559 b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg); 560 } 561 562 int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) 563 { 564 struct b53_device *dev = ds->priv; 565 unsigned int cpu_port; 566 int ret = 0; 567 u16 pvlan; 568 569 if (!dsa_is_user_port(ds, port)) 570 return 0; 571 572 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 573 574 b53_port_set_ucast_flood(dev, port, true); 575 b53_port_set_mcast_flood(dev, port, true); 576 b53_port_set_learning(dev, port, false); 577 578 if (dev->ops->irq_enable) 579 ret = dev->ops->irq_enable(dev, port); 580 if (ret) 581 return ret; 582 583 /* Clear the Rx and Tx disable bits and set to no spanning tree */ 584 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0); 585 586 /* Set this port, and only this one to be in the default VLAN, 587 * if member of a bridge, restore its membership prior to 588 * bringing down this port. 589 */ 590 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 591 pvlan &= ~0x1ff; 592 pvlan |= BIT(port); 593 pvlan |= dev->ports[port].vlan_ctl_mask; 594 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 595 596 b53_imp_vlan_setup(ds, cpu_port); 597 598 /* If EEE was enabled, restore it */ 599 if (dev->ports[port].eee.eee_enabled) 600 b53_eee_enable_set(ds, port, true); 601 602 return 0; 603 } 604 EXPORT_SYMBOL(b53_enable_port); 605 606 void b53_disable_port(struct dsa_switch *ds, int port) 607 { 608 struct b53_device *dev = ds->priv; 609 u8 reg; 610 611 /* Disable Tx/Rx for the port */ 612 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); 613 reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE; 614 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); 615 616 if (dev->ops->irq_disable) 617 dev->ops->irq_disable(dev, port); 618 } 619 EXPORT_SYMBOL(b53_disable_port); 620 621 void b53_brcm_hdr_setup(struct dsa_switch *ds, int port) 622 { 623 struct b53_device *dev = ds->priv; 624 bool tag_en = !(dev->tag_protocol == DSA_TAG_PROTO_NONE); 625 u8 hdr_ctl, val; 626 u16 reg; 627 628 /* Resolve which bit controls the Broadcom tag */ 629 switch (port) { 630 case 8: 631 val = BRCM_HDR_P8_EN; 632 break; 633 case 7: 634 val = BRCM_HDR_P7_EN; 635 break; 636 case 5: 637 val = BRCM_HDR_P5_EN; 638 break; 639 default: 640 val = 0; 641 break; 642 } 643 644 /* Enable management mode if tagging is requested */ 645 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &hdr_ctl); 646 if (tag_en) 647 hdr_ctl |= SM_SW_FWD_MODE; 648 else 649 hdr_ctl &= ~SM_SW_FWD_MODE; 650 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, hdr_ctl); 651 652 /* Configure the appropriate IMP port */ 653 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &hdr_ctl); 654 if (port == 8) 655 hdr_ctl |= GC_FRM_MGMT_PORT_MII; 656 else if (port == 5) 657 hdr_ctl |= GC_FRM_MGMT_PORT_M; 658 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl); 659 660 /* Enable Broadcom tags for IMP port */ 661 b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl); 662 if (tag_en) 663 hdr_ctl |= val; 664 else 665 hdr_ctl &= ~val; 666 b53_write8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, hdr_ctl); 667 668 /* Registers below are only accessible on newer devices */ 669 if (!is58xx(dev)) 670 return; 671 672 /* Enable reception Broadcom tag for CPU TX (switch RX) to 673 * allow us to tag outgoing frames 674 */ 675 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, ®); 676 if (tag_en) 677 reg &= ~BIT(port); 678 else 679 reg |= BIT(port); 680 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, reg); 681 682 /* Enable transmission of Broadcom tags from the switch (CPU RX) to 683 * allow delivering frames to the per-port net_devices 684 */ 685 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, ®); 686 if (tag_en) 687 reg &= ~BIT(port); 688 else 689 reg |= BIT(port); 690 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, reg); 691 } 692 EXPORT_SYMBOL(b53_brcm_hdr_setup); 693 694 static void b53_enable_cpu_port(struct b53_device *dev, int port) 695 { 696 u8 port_ctrl; 697 698 /* BCM5325 CPU port is at 8 */ 699 if ((is5325(dev) || is5365(dev)) && port == B53_CPU_PORT_25) 700 port = B53_CPU_PORT; 701 702 port_ctrl = PORT_CTRL_RX_BCST_EN | 703 PORT_CTRL_RX_MCST_EN | 704 PORT_CTRL_RX_UCST_EN; 705 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl); 706 707 b53_brcm_hdr_setup(dev->ds, port); 708 709 b53_port_set_ucast_flood(dev, port, true); 710 b53_port_set_mcast_flood(dev, port, true); 711 b53_port_set_learning(dev, port, false); 712 } 713 714 static void b53_enable_mib(struct b53_device *dev) 715 { 716 u8 gc; 717 718 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 719 gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN); 720 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); 721 } 722 723 static u16 b53_default_pvid(struct b53_device *dev) 724 { 725 if (is5325(dev) || is5365(dev)) 726 return 1; 727 else 728 return 0; 729 } 730 731 static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port) 732 { 733 struct b53_device *dev = ds->priv; 734 735 return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port); 736 } 737 738 int b53_configure_vlan(struct dsa_switch *ds) 739 { 740 struct b53_device *dev = ds->priv; 741 struct b53_vlan vl = { 0 }; 742 struct b53_vlan *v; 743 int i, def_vid; 744 u16 vid; 745 746 def_vid = b53_default_pvid(dev); 747 748 /* clear all vlan entries */ 749 if (is5325(dev) || is5365(dev)) { 750 for (i = def_vid; i < dev->num_vlans; i++) 751 b53_set_vlan_entry(dev, i, &vl); 752 } else { 753 b53_do_vlan_op(dev, VTA_CMD_CLEAR); 754 } 755 756 b53_enable_vlan(dev, -1, dev->vlan_enabled, ds->vlan_filtering); 757 758 /* Create an untagged VLAN entry for the default PVID in case 759 * CONFIG_VLAN_8021Q is disabled and there are no calls to 760 * dsa_slave_vlan_rx_add_vid() to create the default VLAN 761 * entry. Do this only when the tagging protocol is not 762 * DSA_TAG_PROTO_NONE 763 */ 764 b53_for_each_port(dev, i) { 765 v = &dev->vlans[def_vid]; 766 v->members |= BIT(i); 767 if (!b53_vlan_port_needs_forced_tagged(ds, i)) 768 v->untag = v->members; 769 b53_write16(dev, B53_VLAN_PAGE, 770 B53_VLAN_PORT_DEF_TAG(i), def_vid); 771 } 772 773 /* Upon initial call we have not set-up any VLANs, but upon 774 * system resume, we need to restore all VLAN entries. 775 */ 776 for (vid = def_vid; vid < dev->num_vlans; vid++) { 777 v = &dev->vlans[vid]; 778 779 if (!v->members) 780 continue; 781 782 b53_set_vlan_entry(dev, vid, v); 783 b53_fast_age_vlan(dev, vid); 784 } 785 786 return 0; 787 } 788 EXPORT_SYMBOL(b53_configure_vlan); 789 790 static void b53_switch_reset_gpio(struct b53_device *dev) 791 { 792 int gpio = dev->reset_gpio; 793 794 if (gpio < 0) 795 return; 796 797 /* Reset sequence: RESET low(50ms)->high(20ms) 798 */ 799 gpio_set_value(gpio, 0); 800 mdelay(50); 801 802 gpio_set_value(gpio, 1); 803 mdelay(20); 804 805 dev->current_page = 0xff; 806 } 807 808 static int b53_switch_reset(struct b53_device *dev) 809 { 810 unsigned int timeout = 1000; 811 u8 mgmt, reg; 812 813 b53_switch_reset_gpio(dev); 814 815 if (is539x(dev)) { 816 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83); 817 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00); 818 } 819 820 /* This is specific to 58xx devices here, do not use is58xx() which 821 * covers the larger Starfigther 2 family, including 7445/7278 which 822 * still use this driver as a library and need to perform the reset 823 * earlier. 824 */ 825 if (dev->chip_id == BCM58XX_DEVICE_ID || 826 dev->chip_id == BCM583XX_DEVICE_ID) { 827 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®); 828 reg |= SW_RST | EN_SW_RST | EN_CH_RST; 829 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg); 830 831 do { 832 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®); 833 if (!(reg & SW_RST)) 834 break; 835 836 usleep_range(1000, 2000); 837 } while (timeout-- > 0); 838 839 if (timeout == 0) { 840 dev_err(dev->dev, 841 "Timeout waiting for SW_RST to clear!\n"); 842 return -ETIMEDOUT; 843 } 844 } 845 846 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 847 848 if (!(mgmt & SM_SW_FWD_EN)) { 849 mgmt &= ~SM_SW_FWD_MODE; 850 mgmt |= SM_SW_FWD_EN; 851 852 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 853 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 854 855 if (!(mgmt & SM_SW_FWD_EN)) { 856 dev_err(dev->dev, "Failed to enable switch!\n"); 857 return -EINVAL; 858 } 859 } 860 861 b53_enable_mib(dev); 862 863 return b53_flush_arl(dev, FAST_AGE_STATIC); 864 } 865 866 static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg) 867 { 868 struct b53_device *priv = ds->priv; 869 u16 value = 0; 870 int ret; 871 872 if (priv->ops->phy_read16) 873 ret = priv->ops->phy_read16(priv, addr, reg, &value); 874 else 875 ret = b53_read16(priv, B53_PORT_MII_PAGE(addr), 876 reg * 2, &value); 877 878 return ret ? ret : value; 879 } 880 881 static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val) 882 { 883 struct b53_device *priv = ds->priv; 884 885 if (priv->ops->phy_write16) 886 return priv->ops->phy_write16(priv, addr, reg, val); 887 888 return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val); 889 } 890 891 static int b53_reset_switch(struct b53_device *priv) 892 { 893 /* reset vlans */ 894 memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans); 895 memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports); 896 897 priv->serdes_lane = B53_INVALID_LANE; 898 899 return b53_switch_reset(priv); 900 } 901 902 static int b53_apply_config(struct b53_device *priv) 903 { 904 /* disable switching */ 905 b53_set_forwarding(priv, 0); 906 907 b53_configure_vlan(priv->ds); 908 909 /* enable switching */ 910 b53_set_forwarding(priv, 1); 911 912 return 0; 913 } 914 915 static void b53_reset_mib(struct b53_device *priv) 916 { 917 u8 gc; 918 919 b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 920 921 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB); 922 msleep(1); 923 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB); 924 msleep(1); 925 } 926 927 static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev) 928 { 929 if (is5365(dev)) 930 return b53_mibs_65; 931 else if (is63xx(dev)) 932 return b53_mibs_63xx; 933 else if (is58xx(dev)) 934 return b53_mibs_58xx; 935 else 936 return b53_mibs; 937 } 938 939 static unsigned int b53_get_mib_size(struct b53_device *dev) 940 { 941 if (is5365(dev)) 942 return B53_MIBS_65_SIZE; 943 else if (is63xx(dev)) 944 return B53_MIBS_63XX_SIZE; 945 else if (is58xx(dev)) 946 return B53_MIBS_58XX_SIZE; 947 else 948 return B53_MIBS_SIZE; 949 } 950 951 static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port) 952 { 953 /* These ports typically do not have built-in PHYs */ 954 switch (port) { 955 case B53_CPU_PORT_25: 956 case 7: 957 case B53_CPU_PORT: 958 return NULL; 959 } 960 961 return mdiobus_get_phy(ds->slave_mii_bus, port); 962 } 963 964 void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset, 965 uint8_t *data) 966 { 967 struct b53_device *dev = ds->priv; 968 const struct b53_mib_desc *mibs = b53_get_mib(dev); 969 unsigned int mib_size = b53_get_mib_size(dev); 970 struct phy_device *phydev; 971 unsigned int i; 972 973 if (stringset == ETH_SS_STATS) { 974 for (i = 0; i < mib_size; i++) 975 strlcpy(data + i * ETH_GSTRING_LEN, 976 mibs[i].name, ETH_GSTRING_LEN); 977 } else if (stringset == ETH_SS_PHY_STATS) { 978 phydev = b53_get_phy_device(ds, port); 979 if (!phydev) 980 return; 981 982 phy_ethtool_get_strings(phydev, data); 983 } 984 } 985 EXPORT_SYMBOL(b53_get_strings); 986 987 void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) 988 { 989 struct b53_device *dev = ds->priv; 990 const struct b53_mib_desc *mibs = b53_get_mib(dev); 991 unsigned int mib_size = b53_get_mib_size(dev); 992 const struct b53_mib_desc *s; 993 unsigned int i; 994 u64 val = 0; 995 996 if (is5365(dev) && port == 5) 997 port = 8; 998 999 mutex_lock(&dev->stats_mutex); 1000 1001 for (i = 0; i < mib_size; i++) { 1002 s = &mibs[i]; 1003 1004 if (s->size == 8) { 1005 b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val); 1006 } else { 1007 u32 val32; 1008 1009 b53_read32(dev, B53_MIB_PAGE(port), s->offset, 1010 &val32); 1011 val = val32; 1012 } 1013 data[i] = (u64)val; 1014 } 1015 1016 mutex_unlock(&dev->stats_mutex); 1017 } 1018 EXPORT_SYMBOL(b53_get_ethtool_stats); 1019 1020 void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data) 1021 { 1022 struct phy_device *phydev; 1023 1024 phydev = b53_get_phy_device(ds, port); 1025 if (!phydev) 1026 return; 1027 1028 phy_ethtool_get_stats(phydev, NULL, data); 1029 } 1030 EXPORT_SYMBOL(b53_get_ethtool_phy_stats); 1031 1032 int b53_get_sset_count(struct dsa_switch *ds, int port, int sset) 1033 { 1034 struct b53_device *dev = ds->priv; 1035 struct phy_device *phydev; 1036 1037 if (sset == ETH_SS_STATS) { 1038 return b53_get_mib_size(dev); 1039 } else if (sset == ETH_SS_PHY_STATS) { 1040 phydev = b53_get_phy_device(ds, port); 1041 if (!phydev) 1042 return 0; 1043 1044 return phy_ethtool_get_sset_count(phydev); 1045 } 1046 1047 return 0; 1048 } 1049 EXPORT_SYMBOL(b53_get_sset_count); 1050 1051 enum b53_devlink_resource_id { 1052 B53_DEVLINK_PARAM_ID_VLAN_TABLE, 1053 }; 1054 1055 static u64 b53_devlink_vlan_table_get(void *priv) 1056 { 1057 struct b53_device *dev = priv; 1058 struct b53_vlan *vl; 1059 unsigned int i; 1060 u64 count = 0; 1061 1062 for (i = 0; i < dev->num_vlans; i++) { 1063 vl = &dev->vlans[i]; 1064 if (vl->members) 1065 count++; 1066 } 1067 1068 return count; 1069 } 1070 1071 int b53_setup_devlink_resources(struct dsa_switch *ds) 1072 { 1073 struct devlink_resource_size_params size_params; 1074 struct b53_device *dev = ds->priv; 1075 int err; 1076 1077 devlink_resource_size_params_init(&size_params, dev->num_vlans, 1078 dev->num_vlans, 1079 1, DEVLINK_RESOURCE_UNIT_ENTRY); 1080 1081 err = dsa_devlink_resource_register(ds, "VLAN", dev->num_vlans, 1082 B53_DEVLINK_PARAM_ID_VLAN_TABLE, 1083 DEVLINK_RESOURCE_ID_PARENT_TOP, 1084 &size_params); 1085 if (err) 1086 goto out; 1087 1088 dsa_devlink_resource_occ_get_register(ds, 1089 B53_DEVLINK_PARAM_ID_VLAN_TABLE, 1090 b53_devlink_vlan_table_get, dev); 1091 1092 return 0; 1093 out: 1094 dsa_devlink_resources_unregister(ds); 1095 return err; 1096 } 1097 EXPORT_SYMBOL(b53_setup_devlink_resources); 1098 1099 static int b53_setup(struct dsa_switch *ds) 1100 { 1101 struct b53_device *dev = ds->priv; 1102 unsigned int port; 1103 int ret; 1104 1105 /* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set 1106 * which forces the CPU port to be tagged in all VLANs. 1107 */ 1108 ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE; 1109 1110 ret = b53_reset_switch(dev); 1111 if (ret) { 1112 dev_err(ds->dev, "failed to reset switch\n"); 1113 return ret; 1114 } 1115 1116 b53_reset_mib(dev); 1117 1118 ret = b53_apply_config(dev); 1119 if (ret) { 1120 dev_err(ds->dev, "failed to apply configuration\n"); 1121 return ret; 1122 } 1123 1124 /* Configure IMP/CPU port, disable all other ports. Enabled 1125 * ports will be configured with .port_enable 1126 */ 1127 for (port = 0; port < dev->num_ports; port++) { 1128 if (dsa_is_cpu_port(ds, port)) 1129 b53_enable_cpu_port(dev, port); 1130 else 1131 b53_disable_port(ds, port); 1132 } 1133 1134 return b53_setup_devlink_resources(ds); 1135 } 1136 1137 static void b53_teardown(struct dsa_switch *ds) 1138 { 1139 dsa_devlink_resources_unregister(ds); 1140 } 1141 1142 static void b53_force_link(struct b53_device *dev, int port, int link) 1143 { 1144 u8 reg, val, off; 1145 1146 /* Override the port settings */ 1147 if (port == dev->cpu_port) { 1148 off = B53_PORT_OVERRIDE_CTRL; 1149 val = PORT_OVERRIDE_EN; 1150 } else { 1151 off = B53_GMII_PORT_OVERRIDE_CTRL(port); 1152 val = GMII_PO_EN; 1153 } 1154 1155 b53_read8(dev, B53_CTRL_PAGE, off, ®); 1156 reg |= val; 1157 if (link) 1158 reg |= PORT_OVERRIDE_LINK; 1159 else 1160 reg &= ~PORT_OVERRIDE_LINK; 1161 b53_write8(dev, B53_CTRL_PAGE, off, reg); 1162 } 1163 1164 static void b53_force_port_config(struct b53_device *dev, int port, 1165 int speed, int duplex, 1166 bool tx_pause, bool rx_pause) 1167 { 1168 u8 reg, val, off; 1169 1170 /* Override the port settings */ 1171 if (port == dev->cpu_port) { 1172 off = B53_PORT_OVERRIDE_CTRL; 1173 val = PORT_OVERRIDE_EN; 1174 } else { 1175 off = B53_GMII_PORT_OVERRIDE_CTRL(port); 1176 val = GMII_PO_EN; 1177 } 1178 1179 b53_read8(dev, B53_CTRL_PAGE, off, ®); 1180 reg |= val; 1181 if (duplex == DUPLEX_FULL) 1182 reg |= PORT_OVERRIDE_FULL_DUPLEX; 1183 else 1184 reg &= ~PORT_OVERRIDE_FULL_DUPLEX; 1185 1186 switch (speed) { 1187 case 2000: 1188 reg |= PORT_OVERRIDE_SPEED_2000M; 1189 fallthrough; 1190 case SPEED_1000: 1191 reg |= PORT_OVERRIDE_SPEED_1000M; 1192 break; 1193 case SPEED_100: 1194 reg |= PORT_OVERRIDE_SPEED_100M; 1195 break; 1196 case SPEED_10: 1197 reg |= PORT_OVERRIDE_SPEED_10M; 1198 break; 1199 default: 1200 dev_err(dev->dev, "unknown speed: %d\n", speed); 1201 return; 1202 } 1203 1204 if (rx_pause) 1205 reg |= PORT_OVERRIDE_RX_FLOW; 1206 if (tx_pause) 1207 reg |= PORT_OVERRIDE_TX_FLOW; 1208 1209 b53_write8(dev, B53_CTRL_PAGE, off, reg); 1210 } 1211 1212 static void b53_adjust_link(struct dsa_switch *ds, int port, 1213 struct phy_device *phydev) 1214 { 1215 struct b53_device *dev = ds->priv; 1216 struct ethtool_eee *p = &dev->ports[port].eee; 1217 u8 rgmii_ctrl = 0, reg = 0, off; 1218 bool tx_pause = false; 1219 bool rx_pause = false; 1220 1221 if (!phy_is_pseudo_fixed_link(phydev)) 1222 return; 1223 1224 /* Enable flow control on BCM5301x's CPU port */ 1225 if (is5301x(dev) && port == dev->cpu_port) 1226 tx_pause = rx_pause = true; 1227 1228 if (phydev->pause) { 1229 if (phydev->asym_pause) 1230 tx_pause = true; 1231 rx_pause = true; 1232 } 1233 1234 b53_force_port_config(dev, port, phydev->speed, phydev->duplex, 1235 tx_pause, rx_pause); 1236 b53_force_link(dev, port, phydev->link); 1237 1238 if (is531x5(dev) && phy_interface_is_rgmii(phydev)) { 1239 if (port == 8) 1240 off = B53_RGMII_CTRL_IMP; 1241 else 1242 off = B53_RGMII_CTRL_P(port); 1243 1244 /* Configure the port RGMII clock delay by DLL disabled and 1245 * tx_clk aligned timing (restoring to reset defaults) 1246 */ 1247 b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl); 1248 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC | 1249 RGMII_CTRL_TIMING_SEL); 1250 1251 /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make 1252 * sure that we enable the port TX clock internal delay to 1253 * account for this internal delay that is inserted, otherwise 1254 * the switch won't be able to receive correctly. 1255 * 1256 * PHY_INTERFACE_MODE_RGMII means that we are not introducing 1257 * any delay neither on transmission nor reception, so the 1258 * BCM53125 must also be configured accordingly to account for 1259 * the lack of delay and introduce 1260 * 1261 * The BCM53125 switch has its RX clock and TX clock control 1262 * swapped, hence the reason why we modify the TX clock path in 1263 * the "RGMII" case 1264 */ 1265 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) 1266 rgmii_ctrl |= RGMII_CTRL_DLL_TXC; 1267 if (phydev->interface == PHY_INTERFACE_MODE_RGMII) 1268 rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC; 1269 rgmii_ctrl |= RGMII_CTRL_TIMING_SEL; 1270 b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl); 1271 1272 dev_info(ds->dev, "Configured port %d for %s\n", port, 1273 phy_modes(phydev->interface)); 1274 } 1275 1276 /* configure MII port if necessary */ 1277 if (is5325(dev)) { 1278 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1279 ®); 1280 1281 /* reverse mii needs to be enabled */ 1282 if (!(reg & PORT_OVERRIDE_RV_MII_25)) { 1283 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1284 reg | PORT_OVERRIDE_RV_MII_25); 1285 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1286 ®); 1287 1288 if (!(reg & PORT_OVERRIDE_RV_MII_25)) { 1289 dev_err(ds->dev, 1290 "Failed to enable reverse MII mode\n"); 1291 return; 1292 } 1293 } 1294 } else if (is5301x(dev)) { 1295 if (port != dev->cpu_port) { 1296 b53_force_port_config(dev, dev->cpu_port, 2000, 1297 DUPLEX_FULL, true, true); 1298 b53_force_link(dev, dev->cpu_port, 1); 1299 } 1300 } 1301 1302 /* Re-negotiate EEE if it was enabled already */ 1303 p->eee_enabled = b53_eee_init(ds, port, phydev); 1304 } 1305 1306 void b53_port_event(struct dsa_switch *ds, int port) 1307 { 1308 struct b53_device *dev = ds->priv; 1309 bool link; 1310 u16 sts; 1311 1312 b53_read16(dev, B53_STAT_PAGE, B53_LINK_STAT, &sts); 1313 link = !!(sts & BIT(port)); 1314 dsa_port_phylink_mac_change(ds, port, link); 1315 } 1316 EXPORT_SYMBOL(b53_port_event); 1317 1318 void b53_phylink_validate(struct dsa_switch *ds, int port, 1319 unsigned long *supported, 1320 struct phylink_link_state *state) 1321 { 1322 struct b53_device *dev = ds->priv; 1323 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 1324 1325 if (dev->ops->serdes_phylink_validate) 1326 dev->ops->serdes_phylink_validate(dev, port, mask, state); 1327 1328 /* Allow all the expected bits */ 1329 phylink_set(mask, Autoneg); 1330 phylink_set_port_modes(mask); 1331 phylink_set(mask, Pause); 1332 phylink_set(mask, Asym_Pause); 1333 1334 /* With the exclusion of 5325/5365, MII, Reverse MII and 802.3z, we 1335 * support Gigabit, including Half duplex. 1336 */ 1337 if (state->interface != PHY_INTERFACE_MODE_MII && 1338 state->interface != PHY_INTERFACE_MODE_REVMII && 1339 !phy_interface_mode_is_8023z(state->interface) && 1340 !(is5325(dev) || is5365(dev))) { 1341 phylink_set(mask, 1000baseT_Full); 1342 phylink_set(mask, 1000baseT_Half); 1343 } 1344 1345 if (!phy_interface_mode_is_8023z(state->interface)) { 1346 phylink_set(mask, 10baseT_Half); 1347 phylink_set(mask, 10baseT_Full); 1348 phylink_set(mask, 100baseT_Half); 1349 phylink_set(mask, 100baseT_Full); 1350 } 1351 1352 bitmap_and(supported, supported, mask, 1353 __ETHTOOL_LINK_MODE_MASK_NBITS); 1354 bitmap_and(state->advertising, state->advertising, mask, 1355 __ETHTOOL_LINK_MODE_MASK_NBITS); 1356 1357 phylink_helper_basex_speed(state); 1358 } 1359 EXPORT_SYMBOL(b53_phylink_validate); 1360 1361 int b53_phylink_mac_link_state(struct dsa_switch *ds, int port, 1362 struct phylink_link_state *state) 1363 { 1364 struct b53_device *dev = ds->priv; 1365 int ret = -EOPNOTSUPP; 1366 1367 if ((phy_interface_mode_is_8023z(state->interface) || 1368 state->interface == PHY_INTERFACE_MODE_SGMII) && 1369 dev->ops->serdes_link_state) 1370 ret = dev->ops->serdes_link_state(dev, port, state); 1371 1372 return ret; 1373 } 1374 EXPORT_SYMBOL(b53_phylink_mac_link_state); 1375 1376 void b53_phylink_mac_config(struct dsa_switch *ds, int port, 1377 unsigned int mode, 1378 const struct phylink_link_state *state) 1379 { 1380 struct b53_device *dev = ds->priv; 1381 1382 if (mode == MLO_AN_PHY || mode == MLO_AN_FIXED) 1383 return; 1384 1385 if ((phy_interface_mode_is_8023z(state->interface) || 1386 state->interface == PHY_INTERFACE_MODE_SGMII) && 1387 dev->ops->serdes_config) 1388 dev->ops->serdes_config(dev, port, mode, state); 1389 } 1390 EXPORT_SYMBOL(b53_phylink_mac_config); 1391 1392 void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port) 1393 { 1394 struct b53_device *dev = ds->priv; 1395 1396 if (dev->ops->serdes_an_restart) 1397 dev->ops->serdes_an_restart(dev, port); 1398 } 1399 EXPORT_SYMBOL(b53_phylink_mac_an_restart); 1400 1401 void b53_phylink_mac_link_down(struct dsa_switch *ds, int port, 1402 unsigned int mode, 1403 phy_interface_t interface) 1404 { 1405 struct b53_device *dev = ds->priv; 1406 1407 if (mode == MLO_AN_PHY) 1408 return; 1409 1410 if (mode == MLO_AN_FIXED) { 1411 b53_force_link(dev, port, false); 1412 return; 1413 } 1414 1415 if (phy_interface_mode_is_8023z(interface) && 1416 dev->ops->serdes_link_set) 1417 dev->ops->serdes_link_set(dev, port, mode, interface, false); 1418 } 1419 EXPORT_SYMBOL(b53_phylink_mac_link_down); 1420 1421 void b53_phylink_mac_link_up(struct dsa_switch *ds, int port, 1422 unsigned int mode, 1423 phy_interface_t interface, 1424 struct phy_device *phydev, 1425 int speed, int duplex, 1426 bool tx_pause, bool rx_pause) 1427 { 1428 struct b53_device *dev = ds->priv; 1429 1430 if (mode == MLO_AN_PHY) 1431 return; 1432 1433 if (mode == MLO_AN_FIXED) { 1434 b53_force_port_config(dev, port, speed, duplex, 1435 tx_pause, rx_pause); 1436 b53_force_link(dev, port, true); 1437 return; 1438 } 1439 1440 if (phy_interface_mode_is_8023z(interface) && 1441 dev->ops->serdes_link_set) 1442 dev->ops->serdes_link_set(dev, port, mode, interface, true); 1443 } 1444 EXPORT_SYMBOL(b53_phylink_mac_link_up); 1445 1446 int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, 1447 struct netlink_ext_ack *extack) 1448 { 1449 struct b53_device *dev = ds->priv; 1450 1451 b53_enable_vlan(dev, port, dev->vlan_enabled, vlan_filtering); 1452 1453 return 0; 1454 } 1455 EXPORT_SYMBOL(b53_vlan_filtering); 1456 1457 static int b53_vlan_prepare(struct dsa_switch *ds, int port, 1458 const struct switchdev_obj_port_vlan *vlan) 1459 { 1460 struct b53_device *dev = ds->priv; 1461 1462 if ((is5325(dev) || is5365(dev)) && vlan->vid == 0) 1463 return -EOPNOTSUPP; 1464 1465 /* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of 1466 * receiving VLAN tagged frames at all, we can still allow the port to 1467 * be configured for egress untagged. 1468 */ 1469 if (dev->chip_id == BCM7278_DEVICE_ID && port == 7 && 1470 !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) 1471 return -EINVAL; 1472 1473 if (vlan->vid >= dev->num_vlans) 1474 return -ERANGE; 1475 1476 b53_enable_vlan(dev, port, true, ds->vlan_filtering); 1477 1478 return 0; 1479 } 1480 1481 int b53_vlan_add(struct dsa_switch *ds, int port, 1482 const struct switchdev_obj_port_vlan *vlan, 1483 struct netlink_ext_ack *extack) 1484 { 1485 struct b53_device *dev = ds->priv; 1486 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1487 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 1488 struct b53_vlan *vl; 1489 int err; 1490 1491 err = b53_vlan_prepare(ds, port, vlan); 1492 if (err) 1493 return err; 1494 1495 vl = &dev->vlans[vlan->vid]; 1496 1497 b53_get_vlan_entry(dev, vlan->vid, vl); 1498 1499 if (vlan->vid == 0 && vlan->vid == b53_default_pvid(dev)) 1500 untagged = true; 1501 1502 vl->members |= BIT(port); 1503 if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port)) 1504 vl->untag |= BIT(port); 1505 else 1506 vl->untag &= ~BIT(port); 1507 1508 b53_set_vlan_entry(dev, vlan->vid, vl); 1509 b53_fast_age_vlan(dev, vlan->vid); 1510 1511 if (pvid && !dsa_is_cpu_port(ds, port)) { 1512 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), 1513 vlan->vid); 1514 b53_fast_age_vlan(dev, vlan->vid); 1515 } 1516 1517 return 0; 1518 } 1519 EXPORT_SYMBOL(b53_vlan_add); 1520 1521 int b53_vlan_del(struct dsa_switch *ds, int port, 1522 const struct switchdev_obj_port_vlan *vlan) 1523 { 1524 struct b53_device *dev = ds->priv; 1525 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1526 struct b53_vlan *vl; 1527 u16 pvid; 1528 1529 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); 1530 1531 vl = &dev->vlans[vlan->vid]; 1532 1533 b53_get_vlan_entry(dev, vlan->vid, vl); 1534 1535 vl->members &= ~BIT(port); 1536 1537 if (pvid == vlan->vid) 1538 pvid = b53_default_pvid(dev); 1539 1540 if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port)) 1541 vl->untag &= ~(BIT(port)); 1542 1543 b53_set_vlan_entry(dev, vlan->vid, vl); 1544 b53_fast_age_vlan(dev, vlan->vid); 1545 1546 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid); 1547 b53_fast_age_vlan(dev, pvid); 1548 1549 return 0; 1550 } 1551 EXPORT_SYMBOL(b53_vlan_del); 1552 1553 /* Address Resolution Logic routines */ 1554 static int b53_arl_op_wait(struct b53_device *dev) 1555 { 1556 unsigned int timeout = 10; 1557 u8 reg; 1558 1559 do { 1560 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®); 1561 if (!(reg & ARLTBL_START_DONE)) 1562 return 0; 1563 1564 usleep_range(1000, 2000); 1565 } while (timeout--); 1566 1567 dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg); 1568 1569 return -ETIMEDOUT; 1570 } 1571 1572 static int b53_arl_rw_op(struct b53_device *dev, unsigned int op) 1573 { 1574 u8 reg; 1575 1576 if (op > ARLTBL_RW) 1577 return -EINVAL; 1578 1579 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®); 1580 reg |= ARLTBL_START_DONE; 1581 if (op) 1582 reg |= ARLTBL_RW; 1583 else 1584 reg &= ~ARLTBL_RW; 1585 if (dev->vlan_enabled) 1586 reg &= ~ARLTBL_IVL_SVL_SELECT; 1587 else 1588 reg |= ARLTBL_IVL_SVL_SELECT; 1589 b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg); 1590 1591 return b53_arl_op_wait(dev); 1592 } 1593 1594 static int b53_arl_read(struct b53_device *dev, u64 mac, 1595 u16 vid, struct b53_arl_entry *ent, u8 *idx) 1596 { 1597 DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES); 1598 unsigned int i; 1599 int ret; 1600 1601 ret = b53_arl_op_wait(dev); 1602 if (ret) 1603 return ret; 1604 1605 bitmap_zero(free_bins, dev->num_arl_bins); 1606 1607 /* Read the bins */ 1608 for (i = 0; i < dev->num_arl_bins; i++) { 1609 u64 mac_vid; 1610 u32 fwd_entry; 1611 1612 b53_read64(dev, B53_ARLIO_PAGE, 1613 B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid); 1614 b53_read32(dev, B53_ARLIO_PAGE, 1615 B53_ARLTBL_DATA_ENTRY(i), &fwd_entry); 1616 b53_arl_to_entry(ent, mac_vid, fwd_entry); 1617 1618 if (!(fwd_entry & ARLTBL_VALID)) { 1619 set_bit(i, free_bins); 1620 continue; 1621 } 1622 if ((mac_vid & ARLTBL_MAC_MASK) != mac) 1623 continue; 1624 if (dev->vlan_enabled && 1625 ((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid) 1626 continue; 1627 *idx = i; 1628 return 0; 1629 } 1630 1631 if (bitmap_weight(free_bins, dev->num_arl_bins) == 0) 1632 return -ENOSPC; 1633 1634 *idx = find_first_bit(free_bins, dev->num_arl_bins); 1635 1636 return -ENOENT; 1637 } 1638 1639 static int b53_arl_op(struct b53_device *dev, int op, int port, 1640 const unsigned char *addr, u16 vid, bool is_valid) 1641 { 1642 struct b53_arl_entry ent; 1643 u32 fwd_entry; 1644 u64 mac, mac_vid = 0; 1645 u8 idx = 0; 1646 int ret; 1647 1648 /* Convert the array into a 64-bit MAC */ 1649 mac = ether_addr_to_u64(addr); 1650 1651 /* Perform a read for the given MAC and VID */ 1652 b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac); 1653 b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid); 1654 1655 /* Issue a read operation for this MAC */ 1656 ret = b53_arl_rw_op(dev, 1); 1657 if (ret) 1658 return ret; 1659 1660 ret = b53_arl_read(dev, mac, vid, &ent, &idx); 1661 1662 /* If this is a read, just finish now */ 1663 if (op) 1664 return ret; 1665 1666 switch (ret) { 1667 case -ETIMEDOUT: 1668 return ret; 1669 case -ENOSPC: 1670 dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n", 1671 addr, vid); 1672 return is_valid ? ret : 0; 1673 case -ENOENT: 1674 /* We could not find a matching MAC, so reset to a new entry */ 1675 dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n", 1676 addr, vid, idx); 1677 fwd_entry = 0; 1678 break; 1679 default: 1680 dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n", 1681 addr, vid, idx); 1682 break; 1683 } 1684 1685 /* For multicast address, the port is a bitmask and the validity 1686 * is determined by having at least one port being still active 1687 */ 1688 if (!is_multicast_ether_addr(addr)) { 1689 ent.port = port; 1690 ent.is_valid = is_valid; 1691 } else { 1692 if (is_valid) 1693 ent.port |= BIT(port); 1694 else 1695 ent.port &= ~BIT(port); 1696 1697 ent.is_valid = !!(ent.port); 1698 } 1699 1700 ent.vid = vid; 1701 ent.is_static = true; 1702 ent.is_age = false; 1703 memcpy(ent.mac, addr, ETH_ALEN); 1704 b53_arl_from_entry(&mac_vid, &fwd_entry, &ent); 1705 1706 b53_write64(dev, B53_ARLIO_PAGE, 1707 B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid); 1708 b53_write32(dev, B53_ARLIO_PAGE, 1709 B53_ARLTBL_DATA_ENTRY(idx), fwd_entry); 1710 1711 return b53_arl_rw_op(dev, 0); 1712 } 1713 1714 int b53_fdb_add(struct dsa_switch *ds, int port, 1715 const unsigned char *addr, u16 vid) 1716 { 1717 struct b53_device *priv = ds->priv; 1718 1719 /* 5325 and 5365 require some more massaging, but could 1720 * be supported eventually 1721 */ 1722 if (is5325(priv) || is5365(priv)) 1723 return -EOPNOTSUPP; 1724 1725 return b53_arl_op(priv, 0, port, addr, vid, true); 1726 } 1727 EXPORT_SYMBOL(b53_fdb_add); 1728 1729 int b53_fdb_del(struct dsa_switch *ds, int port, 1730 const unsigned char *addr, u16 vid) 1731 { 1732 struct b53_device *priv = ds->priv; 1733 1734 return b53_arl_op(priv, 0, port, addr, vid, false); 1735 } 1736 EXPORT_SYMBOL(b53_fdb_del); 1737 1738 static int b53_arl_search_wait(struct b53_device *dev) 1739 { 1740 unsigned int timeout = 1000; 1741 u8 reg; 1742 1743 do { 1744 b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, ®); 1745 if (!(reg & ARL_SRCH_STDN)) 1746 return 0; 1747 1748 if (reg & ARL_SRCH_VLID) 1749 return 0; 1750 1751 usleep_range(1000, 2000); 1752 } while (timeout--); 1753 1754 return -ETIMEDOUT; 1755 } 1756 1757 static void b53_arl_search_rd(struct b53_device *dev, u8 idx, 1758 struct b53_arl_entry *ent) 1759 { 1760 u64 mac_vid; 1761 u32 fwd_entry; 1762 1763 b53_read64(dev, B53_ARLIO_PAGE, 1764 B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid); 1765 b53_read32(dev, B53_ARLIO_PAGE, 1766 B53_ARL_SRCH_RSTL(idx), &fwd_entry); 1767 b53_arl_to_entry(ent, mac_vid, fwd_entry); 1768 } 1769 1770 static int b53_fdb_copy(int port, const struct b53_arl_entry *ent, 1771 dsa_fdb_dump_cb_t *cb, void *data) 1772 { 1773 if (!ent->is_valid) 1774 return 0; 1775 1776 if (port != ent->port) 1777 return 0; 1778 1779 return cb(ent->mac, ent->vid, ent->is_static, data); 1780 } 1781 1782 int b53_fdb_dump(struct dsa_switch *ds, int port, 1783 dsa_fdb_dump_cb_t *cb, void *data) 1784 { 1785 struct b53_device *priv = ds->priv; 1786 struct b53_arl_entry results[2]; 1787 unsigned int count = 0; 1788 int ret; 1789 u8 reg; 1790 1791 /* Start search operation */ 1792 reg = ARL_SRCH_STDN; 1793 b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg); 1794 1795 do { 1796 ret = b53_arl_search_wait(priv); 1797 if (ret) 1798 return ret; 1799 1800 b53_arl_search_rd(priv, 0, &results[0]); 1801 ret = b53_fdb_copy(port, &results[0], cb, data); 1802 if (ret) 1803 return ret; 1804 1805 if (priv->num_arl_bins > 2) { 1806 b53_arl_search_rd(priv, 1, &results[1]); 1807 ret = b53_fdb_copy(port, &results[1], cb, data); 1808 if (ret) 1809 return ret; 1810 1811 if (!results[0].is_valid && !results[1].is_valid) 1812 break; 1813 } 1814 1815 } while (count++ < b53_max_arl_entries(priv) / 2); 1816 1817 return 0; 1818 } 1819 EXPORT_SYMBOL(b53_fdb_dump); 1820 1821 int b53_mdb_add(struct dsa_switch *ds, int port, 1822 const struct switchdev_obj_port_mdb *mdb) 1823 { 1824 struct b53_device *priv = ds->priv; 1825 1826 /* 5325 and 5365 require some more massaging, but could 1827 * be supported eventually 1828 */ 1829 if (is5325(priv) || is5365(priv)) 1830 return -EOPNOTSUPP; 1831 1832 return b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true); 1833 } 1834 EXPORT_SYMBOL(b53_mdb_add); 1835 1836 int b53_mdb_del(struct dsa_switch *ds, int port, 1837 const struct switchdev_obj_port_mdb *mdb) 1838 { 1839 struct b53_device *priv = ds->priv; 1840 int ret; 1841 1842 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false); 1843 if (ret) 1844 dev_err(ds->dev, "failed to delete MDB entry\n"); 1845 1846 return ret; 1847 } 1848 EXPORT_SYMBOL(b53_mdb_del); 1849 1850 int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br) 1851 { 1852 struct b53_device *dev = ds->priv; 1853 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 1854 u16 pvlan, reg; 1855 unsigned int i; 1856 1857 /* On 7278, port 7 which connects to the ASP should only receive 1858 * traffic from matching CFP rules. 1859 */ 1860 if (dev->chip_id == BCM7278_DEVICE_ID && port == 7) 1861 return -EINVAL; 1862 1863 /* Make this port leave the all VLANs join since we will have proper 1864 * VLAN entries from now on 1865 */ 1866 if (is58xx(dev)) { 1867 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®); 1868 reg &= ~BIT(port); 1869 if ((reg & BIT(cpu_port)) == BIT(cpu_port)) 1870 reg &= ~BIT(cpu_port); 1871 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); 1872 } 1873 1874 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 1875 1876 b53_for_each_port(dev, i) { 1877 if (dsa_to_port(ds, i)->bridge_dev != br) 1878 continue; 1879 1880 /* Add this local port to the remote port VLAN control 1881 * membership and update the remote port bitmask 1882 */ 1883 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); 1884 reg |= BIT(port); 1885 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg); 1886 dev->ports[i].vlan_ctl_mask = reg; 1887 1888 pvlan |= BIT(i); 1889 } 1890 1891 /* Configure the local port VLAN control membership to include 1892 * remote ports and update the local port bitmask 1893 */ 1894 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 1895 dev->ports[port].vlan_ctl_mask = pvlan; 1896 1897 return 0; 1898 } 1899 EXPORT_SYMBOL(b53_br_join); 1900 1901 void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) 1902 { 1903 struct b53_device *dev = ds->priv; 1904 struct b53_vlan *vl = &dev->vlans[0]; 1905 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 1906 unsigned int i; 1907 u16 pvlan, reg, pvid; 1908 1909 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 1910 1911 b53_for_each_port(dev, i) { 1912 /* Don't touch the remaining ports */ 1913 if (dsa_to_port(ds, i)->bridge_dev != br) 1914 continue; 1915 1916 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); 1917 reg &= ~BIT(port); 1918 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg); 1919 dev->ports[port].vlan_ctl_mask = reg; 1920 1921 /* Prevent self removal to preserve isolation */ 1922 if (port != i) 1923 pvlan &= ~BIT(i); 1924 } 1925 1926 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 1927 dev->ports[port].vlan_ctl_mask = pvlan; 1928 1929 pvid = b53_default_pvid(dev); 1930 1931 /* Make this port join all VLANs without VLAN entries */ 1932 if (is58xx(dev)) { 1933 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®); 1934 reg |= BIT(port); 1935 if (!(reg & BIT(cpu_port))) 1936 reg |= BIT(cpu_port); 1937 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); 1938 } else { 1939 b53_get_vlan_entry(dev, pvid, vl); 1940 vl->members |= BIT(port) | BIT(cpu_port); 1941 vl->untag |= BIT(port) | BIT(cpu_port); 1942 b53_set_vlan_entry(dev, pvid, vl); 1943 } 1944 } 1945 EXPORT_SYMBOL(b53_br_leave); 1946 1947 void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state) 1948 { 1949 struct b53_device *dev = ds->priv; 1950 u8 hw_state; 1951 u8 reg; 1952 1953 switch (state) { 1954 case BR_STATE_DISABLED: 1955 hw_state = PORT_CTRL_DIS_STATE; 1956 break; 1957 case BR_STATE_LISTENING: 1958 hw_state = PORT_CTRL_LISTEN_STATE; 1959 break; 1960 case BR_STATE_LEARNING: 1961 hw_state = PORT_CTRL_LEARN_STATE; 1962 break; 1963 case BR_STATE_FORWARDING: 1964 hw_state = PORT_CTRL_FWD_STATE; 1965 break; 1966 case BR_STATE_BLOCKING: 1967 hw_state = PORT_CTRL_BLOCK_STATE; 1968 break; 1969 default: 1970 dev_err(ds->dev, "invalid STP state: %d\n", state); 1971 return; 1972 } 1973 1974 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); 1975 reg &= ~PORT_CTRL_STP_STATE_MASK; 1976 reg |= hw_state; 1977 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); 1978 } 1979 EXPORT_SYMBOL(b53_br_set_stp_state); 1980 1981 void b53_br_fast_age(struct dsa_switch *ds, int port) 1982 { 1983 struct b53_device *dev = ds->priv; 1984 1985 if (b53_fast_age_port(dev, port)) 1986 dev_err(ds->dev, "fast ageing failed\n"); 1987 } 1988 EXPORT_SYMBOL(b53_br_fast_age); 1989 1990 int b53_br_flags_pre(struct dsa_switch *ds, int port, 1991 struct switchdev_brport_flags flags, 1992 struct netlink_ext_ack *extack) 1993 { 1994 if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_LEARNING)) 1995 return -EINVAL; 1996 1997 return 0; 1998 } 1999 EXPORT_SYMBOL(b53_br_flags_pre); 2000 2001 int b53_br_flags(struct dsa_switch *ds, int port, 2002 struct switchdev_brport_flags flags, 2003 struct netlink_ext_ack *extack) 2004 { 2005 if (flags.mask & BR_FLOOD) 2006 b53_port_set_ucast_flood(ds->priv, port, 2007 !!(flags.val & BR_FLOOD)); 2008 if (flags.mask & BR_MCAST_FLOOD) 2009 b53_port_set_mcast_flood(ds->priv, port, 2010 !!(flags.val & BR_MCAST_FLOOD)); 2011 if (flags.mask & BR_LEARNING) 2012 b53_port_set_learning(ds->priv, port, 2013 !!(flags.val & BR_LEARNING)); 2014 2015 return 0; 2016 } 2017 EXPORT_SYMBOL(b53_br_flags); 2018 2019 int b53_set_mrouter(struct dsa_switch *ds, int port, bool mrouter, 2020 struct netlink_ext_ack *extack) 2021 { 2022 b53_port_set_mcast_flood(ds->priv, port, mrouter); 2023 2024 return 0; 2025 } 2026 EXPORT_SYMBOL(b53_set_mrouter); 2027 2028 static bool b53_possible_cpu_port(struct dsa_switch *ds, int port) 2029 { 2030 /* Broadcom switches will accept enabling Broadcom tags on the 2031 * following ports: 5, 7 and 8, any other port is not supported 2032 */ 2033 switch (port) { 2034 case B53_CPU_PORT_25: 2035 case 7: 2036 case B53_CPU_PORT: 2037 return true; 2038 } 2039 2040 return false; 2041 } 2042 2043 static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port, 2044 enum dsa_tag_protocol tag_protocol) 2045 { 2046 bool ret = b53_possible_cpu_port(ds, port); 2047 2048 if (!ret) { 2049 dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n", 2050 port); 2051 return ret; 2052 } 2053 2054 switch (tag_protocol) { 2055 case DSA_TAG_PROTO_BRCM: 2056 case DSA_TAG_PROTO_BRCM_PREPEND: 2057 dev_warn(ds->dev, 2058 "Port %d is stacked to Broadcom tag switch\n", port); 2059 ret = false; 2060 break; 2061 default: 2062 ret = true; 2063 break; 2064 } 2065 2066 return ret; 2067 } 2068 2069 enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port, 2070 enum dsa_tag_protocol mprot) 2071 { 2072 struct b53_device *dev = ds->priv; 2073 2074 if (!b53_can_enable_brcm_tags(ds, port, mprot)) { 2075 dev->tag_protocol = DSA_TAG_PROTO_NONE; 2076 goto out; 2077 } 2078 2079 /* Older models require a different 6 byte tag */ 2080 if (is5325(dev) || is5365(dev) || is63xx(dev)) { 2081 dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY; 2082 goto out; 2083 } 2084 2085 /* Broadcom BCM58xx chips have a flow accelerator on Port 8 2086 * which requires us to use the prepended Broadcom tag type 2087 */ 2088 if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT) { 2089 dev->tag_protocol = DSA_TAG_PROTO_BRCM_PREPEND; 2090 goto out; 2091 } 2092 2093 dev->tag_protocol = DSA_TAG_PROTO_BRCM; 2094 out: 2095 return dev->tag_protocol; 2096 } 2097 EXPORT_SYMBOL(b53_get_tag_protocol); 2098 2099 int b53_mirror_add(struct dsa_switch *ds, int port, 2100 struct dsa_mall_mirror_tc_entry *mirror, bool ingress) 2101 { 2102 struct b53_device *dev = ds->priv; 2103 u16 reg, loc; 2104 2105 if (ingress) 2106 loc = B53_IG_MIR_CTL; 2107 else 2108 loc = B53_EG_MIR_CTL; 2109 2110 b53_read16(dev, B53_MGMT_PAGE, loc, ®); 2111 reg |= BIT(port); 2112 b53_write16(dev, B53_MGMT_PAGE, loc, reg); 2113 2114 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®); 2115 reg &= ~CAP_PORT_MASK; 2116 reg |= mirror->to_local_port; 2117 reg |= MIRROR_EN; 2118 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg); 2119 2120 return 0; 2121 } 2122 EXPORT_SYMBOL(b53_mirror_add); 2123 2124 void b53_mirror_del(struct dsa_switch *ds, int port, 2125 struct dsa_mall_mirror_tc_entry *mirror) 2126 { 2127 struct b53_device *dev = ds->priv; 2128 bool loc_disable = false, other_loc_disable = false; 2129 u16 reg, loc; 2130 2131 if (mirror->ingress) 2132 loc = B53_IG_MIR_CTL; 2133 else 2134 loc = B53_EG_MIR_CTL; 2135 2136 /* Update the desired ingress/egress register */ 2137 b53_read16(dev, B53_MGMT_PAGE, loc, ®); 2138 reg &= ~BIT(port); 2139 if (!(reg & MIRROR_MASK)) 2140 loc_disable = true; 2141 b53_write16(dev, B53_MGMT_PAGE, loc, reg); 2142 2143 /* Now look at the other one to know if we can disable mirroring 2144 * entirely 2145 */ 2146 if (mirror->ingress) 2147 b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, ®); 2148 else 2149 b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, ®); 2150 if (!(reg & MIRROR_MASK)) 2151 other_loc_disable = true; 2152 2153 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®); 2154 /* Both no longer have ports, let's disable mirroring */ 2155 if (loc_disable && other_loc_disable) { 2156 reg &= ~MIRROR_EN; 2157 reg &= ~mirror->to_local_port; 2158 } 2159 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg); 2160 } 2161 EXPORT_SYMBOL(b53_mirror_del); 2162 2163 void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable) 2164 { 2165 struct b53_device *dev = ds->priv; 2166 u16 reg; 2167 2168 b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, ®); 2169 if (enable) 2170 reg |= BIT(port); 2171 else 2172 reg &= ~BIT(port); 2173 b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg); 2174 } 2175 EXPORT_SYMBOL(b53_eee_enable_set); 2176 2177 2178 /* Returns 0 if EEE was not enabled, or 1 otherwise 2179 */ 2180 int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy) 2181 { 2182 int ret; 2183 2184 ret = phy_init_eee(phy, 0); 2185 if (ret) 2186 return 0; 2187 2188 b53_eee_enable_set(ds, port, true); 2189 2190 return 1; 2191 } 2192 EXPORT_SYMBOL(b53_eee_init); 2193 2194 int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) 2195 { 2196 struct b53_device *dev = ds->priv; 2197 struct ethtool_eee *p = &dev->ports[port].eee; 2198 u16 reg; 2199 2200 if (is5325(dev) || is5365(dev)) 2201 return -EOPNOTSUPP; 2202 2203 b53_read16(dev, B53_EEE_PAGE, B53_EEE_LPI_INDICATE, ®); 2204 e->eee_enabled = p->eee_enabled; 2205 e->eee_active = !!(reg & BIT(port)); 2206 2207 return 0; 2208 } 2209 EXPORT_SYMBOL(b53_get_mac_eee); 2210 2211 int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) 2212 { 2213 struct b53_device *dev = ds->priv; 2214 struct ethtool_eee *p = &dev->ports[port].eee; 2215 2216 if (is5325(dev) || is5365(dev)) 2217 return -EOPNOTSUPP; 2218 2219 p->eee_enabled = e->eee_enabled; 2220 b53_eee_enable_set(ds, port, e->eee_enabled); 2221 2222 return 0; 2223 } 2224 EXPORT_SYMBOL(b53_set_mac_eee); 2225 2226 static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu) 2227 { 2228 struct b53_device *dev = ds->priv; 2229 bool enable_jumbo; 2230 bool allow_10_100; 2231 2232 if (is5325(dev) || is5365(dev)) 2233 return -EOPNOTSUPP; 2234 2235 enable_jumbo = (mtu >= JMS_MIN_SIZE); 2236 allow_10_100 = (dev->chip_id == BCM583XX_DEVICE_ID); 2237 2238 return b53_set_jumbo(dev, enable_jumbo, allow_10_100); 2239 } 2240 2241 static int b53_get_max_mtu(struct dsa_switch *ds, int port) 2242 { 2243 return JMS_MAX_SIZE; 2244 } 2245 2246 static const struct dsa_switch_ops b53_switch_ops = { 2247 .get_tag_protocol = b53_get_tag_protocol, 2248 .setup = b53_setup, 2249 .teardown = b53_teardown, 2250 .get_strings = b53_get_strings, 2251 .get_ethtool_stats = b53_get_ethtool_stats, 2252 .get_sset_count = b53_get_sset_count, 2253 .get_ethtool_phy_stats = b53_get_ethtool_phy_stats, 2254 .phy_read = b53_phy_read16, 2255 .phy_write = b53_phy_write16, 2256 .adjust_link = b53_adjust_link, 2257 .phylink_validate = b53_phylink_validate, 2258 .phylink_mac_link_state = b53_phylink_mac_link_state, 2259 .phylink_mac_config = b53_phylink_mac_config, 2260 .phylink_mac_an_restart = b53_phylink_mac_an_restart, 2261 .phylink_mac_link_down = b53_phylink_mac_link_down, 2262 .phylink_mac_link_up = b53_phylink_mac_link_up, 2263 .port_enable = b53_enable_port, 2264 .port_disable = b53_disable_port, 2265 .get_mac_eee = b53_get_mac_eee, 2266 .set_mac_eee = b53_set_mac_eee, 2267 .port_bridge_join = b53_br_join, 2268 .port_bridge_leave = b53_br_leave, 2269 .port_pre_bridge_flags = b53_br_flags_pre, 2270 .port_bridge_flags = b53_br_flags, 2271 .port_set_mrouter = b53_set_mrouter, 2272 .port_stp_state_set = b53_br_set_stp_state, 2273 .port_fast_age = b53_br_fast_age, 2274 .port_vlan_filtering = b53_vlan_filtering, 2275 .port_vlan_add = b53_vlan_add, 2276 .port_vlan_del = b53_vlan_del, 2277 .port_fdb_dump = b53_fdb_dump, 2278 .port_fdb_add = b53_fdb_add, 2279 .port_fdb_del = b53_fdb_del, 2280 .port_mirror_add = b53_mirror_add, 2281 .port_mirror_del = b53_mirror_del, 2282 .port_mdb_add = b53_mdb_add, 2283 .port_mdb_del = b53_mdb_del, 2284 .port_max_mtu = b53_get_max_mtu, 2285 .port_change_mtu = b53_change_mtu, 2286 }; 2287 2288 struct b53_chip_data { 2289 u32 chip_id; 2290 const char *dev_name; 2291 u16 vlans; 2292 u16 enabled_ports; 2293 u8 cpu_port; 2294 u8 vta_regs[3]; 2295 u8 arl_bins; 2296 u16 arl_buckets; 2297 u8 duplex_reg; 2298 u8 jumbo_pm_reg; 2299 u8 jumbo_size_reg; 2300 }; 2301 2302 #define B53_VTA_REGS \ 2303 { B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY } 2304 #define B53_VTA_REGS_9798 \ 2305 { B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 } 2306 #define B53_VTA_REGS_63XX \ 2307 { B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX } 2308 2309 static const struct b53_chip_data b53_switch_chips[] = { 2310 { 2311 .chip_id = BCM5325_DEVICE_ID, 2312 .dev_name = "BCM5325", 2313 .vlans = 16, 2314 .enabled_ports = 0x1f, 2315 .arl_bins = 2, 2316 .arl_buckets = 1024, 2317 .cpu_port = B53_CPU_PORT_25, 2318 .duplex_reg = B53_DUPLEX_STAT_FE, 2319 }, 2320 { 2321 .chip_id = BCM5365_DEVICE_ID, 2322 .dev_name = "BCM5365", 2323 .vlans = 256, 2324 .enabled_ports = 0x1f, 2325 .arl_bins = 2, 2326 .arl_buckets = 1024, 2327 .cpu_port = B53_CPU_PORT_25, 2328 .duplex_reg = B53_DUPLEX_STAT_FE, 2329 }, 2330 { 2331 .chip_id = BCM5389_DEVICE_ID, 2332 .dev_name = "BCM5389", 2333 .vlans = 4096, 2334 .enabled_ports = 0x1f, 2335 .arl_bins = 4, 2336 .arl_buckets = 1024, 2337 .cpu_port = B53_CPU_PORT, 2338 .vta_regs = B53_VTA_REGS, 2339 .duplex_reg = B53_DUPLEX_STAT_GE, 2340 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2341 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2342 }, 2343 { 2344 .chip_id = BCM5395_DEVICE_ID, 2345 .dev_name = "BCM5395", 2346 .vlans = 4096, 2347 .enabled_ports = 0x1f, 2348 .arl_bins = 4, 2349 .arl_buckets = 1024, 2350 .cpu_port = B53_CPU_PORT, 2351 .vta_regs = B53_VTA_REGS, 2352 .duplex_reg = B53_DUPLEX_STAT_GE, 2353 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2354 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2355 }, 2356 { 2357 .chip_id = BCM5397_DEVICE_ID, 2358 .dev_name = "BCM5397", 2359 .vlans = 4096, 2360 .enabled_ports = 0x1f, 2361 .arl_bins = 4, 2362 .arl_buckets = 1024, 2363 .cpu_port = B53_CPU_PORT, 2364 .vta_regs = B53_VTA_REGS_9798, 2365 .duplex_reg = B53_DUPLEX_STAT_GE, 2366 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2367 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2368 }, 2369 { 2370 .chip_id = BCM5398_DEVICE_ID, 2371 .dev_name = "BCM5398", 2372 .vlans = 4096, 2373 .enabled_ports = 0x7f, 2374 .arl_bins = 4, 2375 .arl_buckets = 1024, 2376 .cpu_port = B53_CPU_PORT, 2377 .vta_regs = B53_VTA_REGS_9798, 2378 .duplex_reg = B53_DUPLEX_STAT_GE, 2379 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2380 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2381 }, 2382 { 2383 .chip_id = BCM53115_DEVICE_ID, 2384 .dev_name = "BCM53115", 2385 .vlans = 4096, 2386 .enabled_ports = 0x1f, 2387 .arl_bins = 4, 2388 .arl_buckets = 1024, 2389 .vta_regs = B53_VTA_REGS, 2390 .cpu_port = B53_CPU_PORT, 2391 .duplex_reg = B53_DUPLEX_STAT_GE, 2392 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2393 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2394 }, 2395 { 2396 .chip_id = BCM53125_DEVICE_ID, 2397 .dev_name = "BCM53125", 2398 .vlans = 4096, 2399 .enabled_ports = 0xff, 2400 .arl_bins = 4, 2401 .arl_buckets = 1024, 2402 .cpu_port = B53_CPU_PORT, 2403 .vta_regs = B53_VTA_REGS, 2404 .duplex_reg = B53_DUPLEX_STAT_GE, 2405 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2406 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2407 }, 2408 { 2409 .chip_id = BCM53128_DEVICE_ID, 2410 .dev_name = "BCM53128", 2411 .vlans = 4096, 2412 .enabled_ports = 0x1ff, 2413 .arl_bins = 4, 2414 .arl_buckets = 1024, 2415 .cpu_port = B53_CPU_PORT, 2416 .vta_regs = B53_VTA_REGS, 2417 .duplex_reg = B53_DUPLEX_STAT_GE, 2418 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2419 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2420 }, 2421 { 2422 .chip_id = BCM63XX_DEVICE_ID, 2423 .dev_name = "BCM63xx", 2424 .vlans = 4096, 2425 .enabled_ports = 0, /* pdata must provide them */ 2426 .arl_bins = 4, 2427 .arl_buckets = 1024, 2428 .cpu_port = B53_CPU_PORT, 2429 .vta_regs = B53_VTA_REGS_63XX, 2430 .duplex_reg = B53_DUPLEX_STAT_63XX, 2431 .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX, 2432 .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX, 2433 }, 2434 { 2435 .chip_id = BCM53010_DEVICE_ID, 2436 .dev_name = "BCM53010", 2437 .vlans = 4096, 2438 .enabled_ports = 0x1f, 2439 .arl_bins = 4, 2440 .arl_buckets = 1024, 2441 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ 2442 .vta_regs = B53_VTA_REGS, 2443 .duplex_reg = B53_DUPLEX_STAT_GE, 2444 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2445 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2446 }, 2447 { 2448 .chip_id = BCM53011_DEVICE_ID, 2449 .dev_name = "BCM53011", 2450 .vlans = 4096, 2451 .enabled_ports = 0x1bf, 2452 .arl_bins = 4, 2453 .arl_buckets = 1024, 2454 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ 2455 .vta_regs = B53_VTA_REGS, 2456 .duplex_reg = B53_DUPLEX_STAT_GE, 2457 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2458 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2459 }, 2460 { 2461 .chip_id = BCM53012_DEVICE_ID, 2462 .dev_name = "BCM53012", 2463 .vlans = 4096, 2464 .enabled_ports = 0x1bf, 2465 .arl_bins = 4, 2466 .arl_buckets = 1024, 2467 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ 2468 .vta_regs = B53_VTA_REGS, 2469 .duplex_reg = B53_DUPLEX_STAT_GE, 2470 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2471 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2472 }, 2473 { 2474 .chip_id = BCM53018_DEVICE_ID, 2475 .dev_name = "BCM53018", 2476 .vlans = 4096, 2477 .enabled_ports = 0x1f, 2478 .arl_bins = 4, 2479 .arl_buckets = 1024, 2480 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ 2481 .vta_regs = B53_VTA_REGS, 2482 .duplex_reg = B53_DUPLEX_STAT_GE, 2483 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2484 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2485 }, 2486 { 2487 .chip_id = BCM53019_DEVICE_ID, 2488 .dev_name = "BCM53019", 2489 .vlans = 4096, 2490 .enabled_ports = 0x1f, 2491 .arl_bins = 4, 2492 .arl_buckets = 1024, 2493 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ 2494 .vta_regs = B53_VTA_REGS, 2495 .duplex_reg = B53_DUPLEX_STAT_GE, 2496 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2497 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2498 }, 2499 { 2500 .chip_id = BCM58XX_DEVICE_ID, 2501 .dev_name = "BCM585xx/586xx/88312", 2502 .vlans = 4096, 2503 .enabled_ports = 0x1ff, 2504 .arl_bins = 4, 2505 .arl_buckets = 1024, 2506 .cpu_port = B53_CPU_PORT, 2507 .vta_regs = B53_VTA_REGS, 2508 .duplex_reg = B53_DUPLEX_STAT_GE, 2509 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2510 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2511 }, 2512 { 2513 .chip_id = BCM583XX_DEVICE_ID, 2514 .dev_name = "BCM583xx/11360", 2515 .vlans = 4096, 2516 .enabled_ports = 0x103, 2517 .arl_bins = 4, 2518 .arl_buckets = 1024, 2519 .cpu_port = B53_CPU_PORT, 2520 .vta_regs = B53_VTA_REGS, 2521 .duplex_reg = B53_DUPLEX_STAT_GE, 2522 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2523 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2524 }, 2525 /* Starfighter 2 */ 2526 { 2527 .chip_id = BCM4908_DEVICE_ID, 2528 .dev_name = "BCM4908", 2529 .vlans = 4096, 2530 .enabled_ports = 0x1bf, 2531 .arl_bins = 4, 2532 .arl_buckets = 256, 2533 .cpu_port = 8, /* TODO: ports 4, 5, 8 */ 2534 .vta_regs = B53_VTA_REGS, 2535 .duplex_reg = B53_DUPLEX_STAT_GE, 2536 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2537 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2538 }, 2539 { 2540 .chip_id = BCM7445_DEVICE_ID, 2541 .dev_name = "BCM7445", 2542 .vlans = 4096, 2543 .enabled_ports = 0x1ff, 2544 .arl_bins = 4, 2545 .arl_buckets = 1024, 2546 .cpu_port = B53_CPU_PORT, 2547 .vta_regs = B53_VTA_REGS, 2548 .duplex_reg = B53_DUPLEX_STAT_GE, 2549 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2550 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2551 }, 2552 { 2553 .chip_id = BCM7278_DEVICE_ID, 2554 .dev_name = "BCM7278", 2555 .vlans = 4096, 2556 .enabled_ports = 0x1ff, 2557 .arl_bins = 4, 2558 .arl_buckets = 256, 2559 .cpu_port = B53_CPU_PORT, 2560 .vta_regs = B53_VTA_REGS, 2561 .duplex_reg = B53_DUPLEX_STAT_GE, 2562 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2563 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2564 }, 2565 }; 2566 2567 static int b53_switch_init(struct b53_device *dev) 2568 { 2569 unsigned int i; 2570 int ret; 2571 2572 for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) { 2573 const struct b53_chip_data *chip = &b53_switch_chips[i]; 2574 2575 if (chip->chip_id == dev->chip_id) { 2576 if (!dev->enabled_ports) 2577 dev->enabled_ports = chip->enabled_ports; 2578 dev->name = chip->dev_name; 2579 dev->duplex_reg = chip->duplex_reg; 2580 dev->vta_regs[0] = chip->vta_regs[0]; 2581 dev->vta_regs[1] = chip->vta_regs[1]; 2582 dev->vta_regs[2] = chip->vta_regs[2]; 2583 dev->jumbo_pm_reg = chip->jumbo_pm_reg; 2584 dev->cpu_port = chip->cpu_port; 2585 dev->num_vlans = chip->vlans; 2586 dev->num_arl_bins = chip->arl_bins; 2587 dev->num_arl_buckets = chip->arl_buckets; 2588 break; 2589 } 2590 } 2591 2592 /* check which BCM5325x version we have */ 2593 if (is5325(dev)) { 2594 u8 vc4; 2595 2596 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4); 2597 2598 /* check reserved bits */ 2599 switch (vc4 & 3) { 2600 case 1: 2601 /* BCM5325E */ 2602 break; 2603 case 3: 2604 /* BCM5325F - do not use port 4 */ 2605 dev->enabled_ports &= ~BIT(4); 2606 break; 2607 default: 2608 /* On the BCM47XX SoCs this is the supported internal switch.*/ 2609 #ifndef CONFIG_BCM47XX 2610 /* BCM5325M */ 2611 return -EINVAL; 2612 #else 2613 break; 2614 #endif 2615 } 2616 } else if (dev->chip_id == BCM53115_DEVICE_ID) { 2617 u64 strap_value; 2618 2619 b53_read48(dev, B53_STAT_PAGE, B53_STRAP_VALUE, &strap_value); 2620 /* use second IMP port if GMII is enabled */ 2621 if (strap_value & SV_GMII_CTRL_115) 2622 dev->cpu_port = 5; 2623 } 2624 2625 /* cpu port is always last */ 2626 dev->num_ports = dev->cpu_port + 1; 2627 dev->enabled_ports |= BIT(dev->cpu_port); 2628 2629 /* Include non standard CPU port built-in PHYs to be probed */ 2630 if (is539x(dev) || is531x5(dev)) { 2631 for (i = 0; i < dev->num_ports; i++) { 2632 if (!(dev->ds->phys_mii_mask & BIT(i)) && 2633 !b53_possible_cpu_port(dev->ds, i)) 2634 dev->ds->phys_mii_mask |= BIT(i); 2635 } 2636 } 2637 2638 dev->ports = devm_kcalloc(dev->dev, 2639 dev->num_ports, sizeof(struct b53_port), 2640 GFP_KERNEL); 2641 if (!dev->ports) 2642 return -ENOMEM; 2643 2644 dev->vlans = devm_kcalloc(dev->dev, 2645 dev->num_vlans, sizeof(struct b53_vlan), 2646 GFP_KERNEL); 2647 if (!dev->vlans) 2648 return -ENOMEM; 2649 2650 dev->reset_gpio = b53_switch_get_reset_gpio(dev); 2651 if (dev->reset_gpio >= 0) { 2652 ret = devm_gpio_request_one(dev->dev, dev->reset_gpio, 2653 GPIOF_OUT_INIT_HIGH, "robo_reset"); 2654 if (ret) 2655 return ret; 2656 } 2657 2658 return 0; 2659 } 2660 2661 struct b53_device *b53_switch_alloc(struct device *base, 2662 const struct b53_io_ops *ops, 2663 void *priv) 2664 { 2665 struct dsa_switch *ds; 2666 struct b53_device *dev; 2667 2668 ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL); 2669 if (!ds) 2670 return NULL; 2671 2672 ds->dev = base; 2673 ds->num_ports = DSA_MAX_PORTS; 2674 2675 dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL); 2676 if (!dev) 2677 return NULL; 2678 2679 ds->priv = dev; 2680 dev->dev = base; 2681 2682 dev->ds = ds; 2683 dev->priv = priv; 2684 dev->ops = ops; 2685 ds->ops = &b53_switch_ops; 2686 dev->vlan_enabled = true; 2687 /* Let DSA handle the case were multiple bridges span the same switch 2688 * device and different VLAN awareness settings are requested, which 2689 * would be breaking filtering semantics for any of the other bridge 2690 * devices. (not hardware supported) 2691 */ 2692 ds->vlan_filtering_is_global = true; 2693 2694 mutex_init(&dev->reg_mutex); 2695 mutex_init(&dev->stats_mutex); 2696 2697 return dev; 2698 } 2699 EXPORT_SYMBOL(b53_switch_alloc); 2700 2701 int b53_switch_detect(struct b53_device *dev) 2702 { 2703 u32 id32; 2704 u16 tmp; 2705 u8 id8; 2706 int ret; 2707 2708 ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8); 2709 if (ret) 2710 return ret; 2711 2712 switch (id8) { 2713 case 0: 2714 /* BCM5325 and BCM5365 do not have this register so reads 2715 * return 0. But the read operation did succeed, so assume this 2716 * is one of them. 2717 * 2718 * Next check if we can write to the 5325's VTA register; for 2719 * 5365 it is read only. 2720 */ 2721 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf); 2722 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp); 2723 2724 if (tmp == 0xf) 2725 dev->chip_id = BCM5325_DEVICE_ID; 2726 else 2727 dev->chip_id = BCM5365_DEVICE_ID; 2728 break; 2729 case BCM5389_DEVICE_ID: 2730 case BCM5395_DEVICE_ID: 2731 case BCM5397_DEVICE_ID: 2732 case BCM5398_DEVICE_ID: 2733 dev->chip_id = id8; 2734 break; 2735 default: 2736 ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32); 2737 if (ret) 2738 return ret; 2739 2740 switch (id32) { 2741 case BCM53115_DEVICE_ID: 2742 case BCM53125_DEVICE_ID: 2743 case BCM53128_DEVICE_ID: 2744 case BCM53010_DEVICE_ID: 2745 case BCM53011_DEVICE_ID: 2746 case BCM53012_DEVICE_ID: 2747 case BCM53018_DEVICE_ID: 2748 case BCM53019_DEVICE_ID: 2749 dev->chip_id = id32; 2750 break; 2751 default: 2752 dev_err(dev->dev, 2753 "unsupported switch detected (BCM53%02x/BCM%x)\n", 2754 id8, id32); 2755 return -ENODEV; 2756 } 2757 } 2758 2759 if (dev->chip_id == BCM5325_DEVICE_ID) 2760 return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25, 2761 &dev->core_rev); 2762 else 2763 return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID, 2764 &dev->core_rev); 2765 } 2766 EXPORT_SYMBOL(b53_switch_detect); 2767 2768 int b53_switch_register(struct b53_device *dev) 2769 { 2770 int ret; 2771 2772 if (dev->pdata) { 2773 dev->chip_id = dev->pdata->chip_id; 2774 dev->enabled_ports = dev->pdata->enabled_ports; 2775 } 2776 2777 if (!dev->chip_id && b53_switch_detect(dev)) 2778 return -EINVAL; 2779 2780 ret = b53_switch_init(dev); 2781 if (ret) 2782 return ret; 2783 2784 dev_info(dev->dev, "found switch: %s, rev %i\n", 2785 dev->name, dev->core_rev); 2786 2787 return dsa_register_switch(dev->ds); 2788 } 2789 EXPORT_SYMBOL(b53_switch_register); 2790 2791 MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>"); 2792 MODULE_DESCRIPTION("B53 switch library"); 2793 MODULE_LICENSE("Dual BSD/GPL"); 2794