1 /* 2 * B53 switch driver main logic 3 * 4 * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org> 5 * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com> 6 * 7 * Permission to use, copy, modify, and/or distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21 22 #include <linux/delay.h> 23 #include <linux/export.h> 24 #include <linux/gpio.h> 25 #include <linux/kernel.h> 26 #include <linux/module.h> 27 #include <linux/platform_data/b53.h> 28 #include <linux/phy.h> 29 #include <linux/phylink.h> 30 #include <linux/etherdevice.h> 31 #include <linux/if_bridge.h> 32 #include <net/dsa.h> 33 34 #include "b53_regs.h" 35 #include "b53_priv.h" 36 37 struct b53_mib_desc { 38 u8 size; 39 u8 offset; 40 const char *name; 41 }; 42 43 /* BCM5365 MIB counters */ 44 static const struct b53_mib_desc b53_mibs_65[] = { 45 { 8, 0x00, "TxOctets" }, 46 { 4, 0x08, "TxDropPkts" }, 47 { 4, 0x10, "TxBroadcastPkts" }, 48 { 4, 0x14, "TxMulticastPkts" }, 49 { 4, 0x18, "TxUnicastPkts" }, 50 { 4, 0x1c, "TxCollisions" }, 51 { 4, 0x20, "TxSingleCollision" }, 52 { 4, 0x24, "TxMultipleCollision" }, 53 { 4, 0x28, "TxDeferredTransmit" }, 54 { 4, 0x2c, "TxLateCollision" }, 55 { 4, 0x30, "TxExcessiveCollision" }, 56 { 4, 0x38, "TxPausePkts" }, 57 { 8, 0x44, "RxOctets" }, 58 { 4, 0x4c, "RxUndersizePkts" }, 59 { 4, 0x50, "RxPausePkts" }, 60 { 4, 0x54, "Pkts64Octets" }, 61 { 4, 0x58, "Pkts65to127Octets" }, 62 { 4, 0x5c, "Pkts128to255Octets" }, 63 { 4, 0x60, "Pkts256to511Octets" }, 64 { 4, 0x64, "Pkts512to1023Octets" }, 65 { 4, 0x68, "Pkts1024to1522Octets" }, 66 { 4, 0x6c, "RxOversizePkts" }, 67 { 4, 0x70, "RxJabbers" }, 68 { 4, 0x74, "RxAlignmentErrors" }, 69 { 4, 0x78, "RxFCSErrors" }, 70 { 8, 0x7c, "RxGoodOctets" }, 71 { 4, 0x84, "RxDropPkts" }, 72 { 4, 0x88, "RxUnicastPkts" }, 73 { 4, 0x8c, "RxMulticastPkts" }, 74 { 4, 0x90, "RxBroadcastPkts" }, 75 { 4, 0x94, "RxSAChanges" }, 76 { 4, 0x98, "RxFragments" }, 77 }; 78 79 #define B53_MIBS_65_SIZE ARRAY_SIZE(b53_mibs_65) 80 81 /* BCM63xx MIB counters */ 82 static const struct b53_mib_desc b53_mibs_63xx[] = { 83 { 8, 0x00, "TxOctets" }, 84 { 4, 0x08, "TxDropPkts" }, 85 { 4, 0x0c, "TxQoSPkts" }, 86 { 4, 0x10, "TxBroadcastPkts" }, 87 { 4, 0x14, "TxMulticastPkts" }, 88 { 4, 0x18, "TxUnicastPkts" }, 89 { 4, 0x1c, "TxCollisions" }, 90 { 4, 0x20, "TxSingleCollision" }, 91 { 4, 0x24, "TxMultipleCollision" }, 92 { 4, 0x28, "TxDeferredTransmit" }, 93 { 4, 0x2c, "TxLateCollision" }, 94 { 4, 0x30, "TxExcessiveCollision" }, 95 { 4, 0x38, "TxPausePkts" }, 96 { 8, 0x3c, "TxQoSOctets" }, 97 { 8, 0x44, "RxOctets" }, 98 { 4, 0x4c, "RxUndersizePkts" }, 99 { 4, 0x50, "RxPausePkts" }, 100 { 4, 0x54, "Pkts64Octets" }, 101 { 4, 0x58, "Pkts65to127Octets" }, 102 { 4, 0x5c, "Pkts128to255Octets" }, 103 { 4, 0x60, "Pkts256to511Octets" }, 104 { 4, 0x64, "Pkts512to1023Octets" }, 105 { 4, 0x68, "Pkts1024to1522Octets" }, 106 { 4, 0x6c, "RxOversizePkts" }, 107 { 4, 0x70, "RxJabbers" }, 108 { 4, 0x74, "RxAlignmentErrors" }, 109 { 4, 0x78, "RxFCSErrors" }, 110 { 8, 0x7c, "RxGoodOctets" }, 111 { 4, 0x84, "RxDropPkts" }, 112 { 4, 0x88, "RxUnicastPkts" }, 113 { 4, 0x8c, "RxMulticastPkts" }, 114 { 4, 0x90, "RxBroadcastPkts" }, 115 { 4, 0x94, "RxSAChanges" }, 116 { 4, 0x98, "RxFragments" }, 117 { 4, 0xa0, "RxSymbolErrors" }, 118 { 4, 0xa4, "RxQoSPkts" }, 119 { 8, 0xa8, "RxQoSOctets" }, 120 { 4, 0xb0, "Pkts1523to2047Octets" }, 121 { 4, 0xb4, "Pkts2048to4095Octets" }, 122 { 4, 0xb8, "Pkts4096to8191Octets" }, 123 { 4, 0xbc, "Pkts8192to9728Octets" }, 124 { 4, 0xc0, "RxDiscarded" }, 125 }; 126 127 #define B53_MIBS_63XX_SIZE ARRAY_SIZE(b53_mibs_63xx) 128 129 /* MIB counters */ 130 static const struct b53_mib_desc b53_mibs[] = { 131 { 8, 0x00, "TxOctets" }, 132 { 4, 0x08, "TxDropPkts" }, 133 { 4, 0x10, "TxBroadcastPkts" }, 134 { 4, 0x14, "TxMulticastPkts" }, 135 { 4, 0x18, "TxUnicastPkts" }, 136 { 4, 0x1c, "TxCollisions" }, 137 { 4, 0x20, "TxSingleCollision" }, 138 { 4, 0x24, "TxMultipleCollision" }, 139 { 4, 0x28, "TxDeferredTransmit" }, 140 { 4, 0x2c, "TxLateCollision" }, 141 { 4, 0x30, "TxExcessiveCollision" }, 142 { 4, 0x38, "TxPausePkts" }, 143 { 8, 0x50, "RxOctets" }, 144 { 4, 0x58, "RxUndersizePkts" }, 145 { 4, 0x5c, "RxPausePkts" }, 146 { 4, 0x60, "Pkts64Octets" }, 147 { 4, 0x64, "Pkts65to127Octets" }, 148 { 4, 0x68, "Pkts128to255Octets" }, 149 { 4, 0x6c, "Pkts256to511Octets" }, 150 { 4, 0x70, "Pkts512to1023Octets" }, 151 { 4, 0x74, "Pkts1024to1522Octets" }, 152 { 4, 0x78, "RxOversizePkts" }, 153 { 4, 0x7c, "RxJabbers" }, 154 { 4, 0x80, "RxAlignmentErrors" }, 155 { 4, 0x84, "RxFCSErrors" }, 156 { 8, 0x88, "RxGoodOctets" }, 157 { 4, 0x90, "RxDropPkts" }, 158 { 4, 0x94, "RxUnicastPkts" }, 159 { 4, 0x98, "RxMulticastPkts" }, 160 { 4, 0x9c, "RxBroadcastPkts" }, 161 { 4, 0xa0, "RxSAChanges" }, 162 { 4, 0xa4, "RxFragments" }, 163 { 4, 0xa8, "RxJumboPkts" }, 164 { 4, 0xac, "RxSymbolErrors" }, 165 { 4, 0xc0, "RxDiscarded" }, 166 }; 167 168 #define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs) 169 170 static const struct b53_mib_desc b53_mibs_58xx[] = { 171 { 8, 0x00, "TxOctets" }, 172 { 4, 0x08, "TxDropPkts" }, 173 { 4, 0x0c, "TxQPKTQ0" }, 174 { 4, 0x10, "TxBroadcastPkts" }, 175 { 4, 0x14, "TxMulticastPkts" }, 176 { 4, 0x18, "TxUnicastPKts" }, 177 { 4, 0x1c, "TxCollisions" }, 178 { 4, 0x20, "TxSingleCollision" }, 179 { 4, 0x24, "TxMultipleCollision" }, 180 { 4, 0x28, "TxDeferredCollision" }, 181 { 4, 0x2c, "TxLateCollision" }, 182 { 4, 0x30, "TxExcessiveCollision" }, 183 { 4, 0x34, "TxFrameInDisc" }, 184 { 4, 0x38, "TxPausePkts" }, 185 { 4, 0x3c, "TxQPKTQ1" }, 186 { 4, 0x40, "TxQPKTQ2" }, 187 { 4, 0x44, "TxQPKTQ3" }, 188 { 4, 0x48, "TxQPKTQ4" }, 189 { 4, 0x4c, "TxQPKTQ5" }, 190 { 8, 0x50, "RxOctets" }, 191 { 4, 0x58, "RxUndersizePkts" }, 192 { 4, 0x5c, "RxPausePkts" }, 193 { 4, 0x60, "RxPkts64Octets" }, 194 { 4, 0x64, "RxPkts65to127Octets" }, 195 { 4, 0x68, "RxPkts128to255Octets" }, 196 { 4, 0x6c, "RxPkts256to511Octets" }, 197 { 4, 0x70, "RxPkts512to1023Octets" }, 198 { 4, 0x74, "RxPkts1024toMaxPktsOctets" }, 199 { 4, 0x78, "RxOversizePkts" }, 200 { 4, 0x7c, "RxJabbers" }, 201 { 4, 0x80, "RxAlignmentErrors" }, 202 { 4, 0x84, "RxFCSErrors" }, 203 { 8, 0x88, "RxGoodOctets" }, 204 { 4, 0x90, "RxDropPkts" }, 205 { 4, 0x94, "RxUnicastPkts" }, 206 { 4, 0x98, "RxMulticastPkts" }, 207 { 4, 0x9c, "RxBroadcastPkts" }, 208 { 4, 0xa0, "RxSAChanges" }, 209 { 4, 0xa4, "RxFragments" }, 210 { 4, 0xa8, "RxJumboPkt" }, 211 { 4, 0xac, "RxSymblErr" }, 212 { 4, 0xb0, "InRangeErrCount" }, 213 { 4, 0xb4, "OutRangeErrCount" }, 214 { 4, 0xb8, "EEELpiEvent" }, 215 { 4, 0xbc, "EEELpiDuration" }, 216 { 4, 0xc0, "RxDiscard" }, 217 { 4, 0xc8, "TxQPKTQ6" }, 218 { 4, 0xcc, "TxQPKTQ7" }, 219 { 4, 0xd0, "TxPkts64Octets" }, 220 { 4, 0xd4, "TxPkts65to127Octets" }, 221 { 4, 0xd8, "TxPkts128to255Octets" }, 222 { 4, 0xdc, "TxPkts256to511Ocets" }, 223 { 4, 0xe0, "TxPkts512to1023Ocets" }, 224 { 4, 0xe4, "TxPkts1024toMaxPktOcets" }, 225 }; 226 227 #define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx) 228 229 static int b53_do_vlan_op(struct b53_device *dev, u8 op) 230 { 231 unsigned int i; 232 233 b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op); 234 235 for (i = 0; i < 10; i++) { 236 u8 vta; 237 238 b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta); 239 if (!(vta & VTA_START_CMD)) 240 return 0; 241 242 usleep_range(100, 200); 243 } 244 245 return -EIO; 246 } 247 248 static void b53_set_vlan_entry(struct b53_device *dev, u16 vid, 249 struct b53_vlan *vlan) 250 { 251 if (is5325(dev)) { 252 u32 entry = 0; 253 254 if (vlan->members) { 255 entry = ((vlan->untag & VA_UNTAG_MASK_25) << 256 VA_UNTAG_S_25) | vlan->members; 257 if (dev->core_rev >= 3) 258 entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S; 259 else 260 entry |= VA_VALID_25; 261 } 262 263 b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry); 264 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid | 265 VTA_RW_STATE_WR | VTA_RW_OP_EN); 266 } else if (is5365(dev)) { 267 u16 entry = 0; 268 269 if (vlan->members) 270 entry = ((vlan->untag & VA_UNTAG_MASK_65) << 271 VA_UNTAG_S_65) | vlan->members | VA_VALID_65; 272 273 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry); 274 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid | 275 VTA_RW_STATE_WR | VTA_RW_OP_EN); 276 } else { 277 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid); 278 b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], 279 (vlan->untag << VTE_UNTAG_S) | vlan->members); 280 281 b53_do_vlan_op(dev, VTA_CMD_WRITE); 282 } 283 284 dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n", 285 vid, vlan->members, vlan->untag); 286 } 287 288 static void b53_get_vlan_entry(struct b53_device *dev, u16 vid, 289 struct b53_vlan *vlan) 290 { 291 if (is5325(dev)) { 292 u32 entry = 0; 293 294 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid | 295 VTA_RW_STATE_RD | VTA_RW_OP_EN); 296 b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry); 297 298 if (dev->core_rev >= 3) 299 vlan->valid = !!(entry & VA_VALID_25_R4); 300 else 301 vlan->valid = !!(entry & VA_VALID_25); 302 vlan->members = entry & VA_MEMBER_MASK; 303 vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25; 304 305 } else if (is5365(dev)) { 306 u16 entry = 0; 307 308 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid | 309 VTA_RW_STATE_WR | VTA_RW_OP_EN); 310 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry); 311 312 vlan->valid = !!(entry & VA_VALID_65); 313 vlan->members = entry & VA_MEMBER_MASK; 314 vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65; 315 } else { 316 u32 entry = 0; 317 318 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid); 319 b53_do_vlan_op(dev, VTA_CMD_READ); 320 b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry); 321 vlan->members = entry & VTE_MEMBERS; 322 vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS; 323 vlan->valid = true; 324 } 325 } 326 327 static void b53_set_forwarding(struct b53_device *dev, int enable) 328 { 329 u8 mgmt; 330 331 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 332 333 if (enable) 334 mgmt |= SM_SW_FWD_EN; 335 else 336 mgmt &= ~SM_SW_FWD_EN; 337 338 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 339 340 /* Include IMP port in dumb forwarding mode 341 */ 342 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt); 343 mgmt |= B53_MII_DUMB_FWDG_EN; 344 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); 345 346 /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether 347 * frames should be flooded or not. 348 */ 349 b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); 350 mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN; 351 b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); 352 } 353 354 static void b53_enable_vlan(struct b53_device *dev, bool enable, 355 bool enable_filtering) 356 { 357 u8 mgmt, vc0, vc1, vc4 = 0, vc5; 358 359 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 360 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0); 361 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1); 362 363 if (is5325(dev) || is5365(dev)) { 364 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4); 365 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5); 366 } else if (is63xx(dev)) { 367 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4); 368 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5); 369 } else { 370 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4); 371 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5); 372 } 373 374 if (enable) { 375 vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; 376 vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN; 377 vc4 &= ~VC4_ING_VID_CHECK_MASK; 378 if (enable_filtering) { 379 vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; 380 vc5 |= VC5_DROP_VTABLE_MISS; 381 } else { 382 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; 383 vc5 &= ~VC5_DROP_VTABLE_MISS; 384 } 385 386 if (is5325(dev)) 387 vc0 &= ~VC0_RESERVED_1; 388 389 if (is5325(dev) || is5365(dev)) 390 vc1 |= VC1_RX_MCST_TAG_EN; 391 392 } else { 393 vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID); 394 vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN); 395 vc4 &= ~VC4_ING_VID_CHECK_MASK; 396 vc5 &= ~VC5_DROP_VTABLE_MISS; 397 398 if (is5325(dev) || is5365(dev)) 399 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; 400 else 401 vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S; 402 403 if (is5325(dev) || is5365(dev)) 404 vc1 &= ~VC1_RX_MCST_TAG_EN; 405 } 406 407 if (!is5325(dev) && !is5365(dev)) 408 vc5 &= ~VC5_VID_FFF_EN; 409 410 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0); 411 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1); 412 413 if (is5325(dev) || is5365(dev)) { 414 /* enable the high 8 bit vid check on 5325 */ 415 if (is5325(dev) && enable) 416 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 417 VC3_HIGH_8BIT_EN); 418 else 419 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0); 420 421 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4); 422 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5); 423 } else if (is63xx(dev)) { 424 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0); 425 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4); 426 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5); 427 } else { 428 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0); 429 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4); 430 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5); 431 } 432 433 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 434 435 dev->vlan_enabled = enable; 436 } 437 438 static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) 439 { 440 u32 port_mask = 0; 441 u16 max_size = JMS_MIN_SIZE; 442 443 if (is5325(dev) || is5365(dev)) 444 return -EINVAL; 445 446 if (enable) { 447 port_mask = dev->enabled_ports; 448 max_size = JMS_MAX_SIZE; 449 if (allow_10_100) 450 port_mask |= JPM_10_100_JUMBO_EN; 451 } 452 453 b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask); 454 return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size); 455 } 456 457 static int b53_flush_arl(struct b53_device *dev, u8 mask) 458 { 459 unsigned int i; 460 461 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, 462 FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask); 463 464 for (i = 0; i < 10; i++) { 465 u8 fast_age_ctrl; 466 467 b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, 468 &fast_age_ctrl); 469 470 if (!(fast_age_ctrl & FAST_AGE_DONE)) 471 goto out; 472 473 msleep(1); 474 } 475 476 return -ETIMEDOUT; 477 out: 478 /* Only age dynamic entries (default behavior) */ 479 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC); 480 return 0; 481 } 482 483 static int b53_fast_age_port(struct b53_device *dev, int port) 484 { 485 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port); 486 487 return b53_flush_arl(dev, FAST_AGE_PORT); 488 } 489 490 static int b53_fast_age_vlan(struct b53_device *dev, u16 vid) 491 { 492 b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid); 493 494 return b53_flush_arl(dev, FAST_AGE_VLAN); 495 } 496 497 void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) 498 { 499 struct b53_device *dev = ds->priv; 500 unsigned int i; 501 u16 pvlan; 502 503 /* Enable the IMP port to be in the same VLAN as the other ports 504 * on a per-port basis such that we only have Port i and IMP in 505 * the same VLAN. 506 */ 507 b53_for_each_port(dev, i) { 508 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan); 509 pvlan |= BIT(cpu_port); 510 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan); 511 } 512 } 513 EXPORT_SYMBOL(b53_imp_vlan_setup); 514 515 int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) 516 { 517 struct b53_device *dev = ds->priv; 518 unsigned int cpu_port; 519 int ret = 0; 520 u16 pvlan; 521 522 if (!dsa_is_user_port(ds, port)) 523 return 0; 524 525 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 526 527 b53_br_egress_floods(ds, port, true, true); 528 529 if (dev->ops->irq_enable) 530 ret = dev->ops->irq_enable(dev, port); 531 if (ret) 532 return ret; 533 534 /* Clear the Rx and Tx disable bits and set to no spanning tree */ 535 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0); 536 537 /* Set this port, and only this one to be in the default VLAN, 538 * if member of a bridge, restore its membership prior to 539 * bringing down this port. 540 */ 541 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 542 pvlan &= ~0x1ff; 543 pvlan |= BIT(port); 544 pvlan |= dev->ports[port].vlan_ctl_mask; 545 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 546 547 b53_imp_vlan_setup(ds, cpu_port); 548 549 /* If EEE was enabled, restore it */ 550 if (dev->ports[port].eee.eee_enabled) 551 b53_eee_enable_set(ds, port, true); 552 553 return 0; 554 } 555 EXPORT_SYMBOL(b53_enable_port); 556 557 void b53_disable_port(struct dsa_switch *ds, int port) 558 { 559 struct b53_device *dev = ds->priv; 560 u8 reg; 561 562 /* Disable Tx/Rx for the port */ 563 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); 564 reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE; 565 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); 566 567 if (dev->ops->irq_disable) 568 dev->ops->irq_disable(dev, port); 569 } 570 EXPORT_SYMBOL(b53_disable_port); 571 572 void b53_brcm_hdr_setup(struct dsa_switch *ds, int port) 573 { 574 struct b53_device *dev = ds->priv; 575 bool tag_en = !(dev->tag_protocol == DSA_TAG_PROTO_NONE); 576 u8 hdr_ctl, val; 577 u16 reg; 578 579 /* Resolve which bit controls the Broadcom tag */ 580 switch (port) { 581 case 8: 582 val = BRCM_HDR_P8_EN; 583 break; 584 case 7: 585 val = BRCM_HDR_P7_EN; 586 break; 587 case 5: 588 val = BRCM_HDR_P5_EN; 589 break; 590 default: 591 val = 0; 592 break; 593 } 594 595 /* Enable management mode if tagging is requested */ 596 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &hdr_ctl); 597 if (tag_en) 598 hdr_ctl |= SM_SW_FWD_MODE; 599 else 600 hdr_ctl &= ~SM_SW_FWD_MODE; 601 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, hdr_ctl); 602 603 /* Configure the appropriate IMP port */ 604 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &hdr_ctl); 605 if (port == 8) 606 hdr_ctl |= GC_FRM_MGMT_PORT_MII; 607 else if (port == 5) 608 hdr_ctl |= GC_FRM_MGMT_PORT_M; 609 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl); 610 611 /* Enable Broadcom tags for IMP port */ 612 b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl); 613 if (tag_en) 614 hdr_ctl |= val; 615 else 616 hdr_ctl &= ~val; 617 b53_write8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, hdr_ctl); 618 619 /* Registers below are only accessible on newer devices */ 620 if (!is58xx(dev)) 621 return; 622 623 /* Enable reception Broadcom tag for CPU TX (switch RX) to 624 * allow us to tag outgoing frames 625 */ 626 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, ®); 627 if (tag_en) 628 reg &= ~BIT(port); 629 else 630 reg |= BIT(port); 631 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, reg); 632 633 /* Enable transmission of Broadcom tags from the switch (CPU RX) to 634 * allow delivering frames to the per-port net_devices 635 */ 636 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, ®); 637 if (tag_en) 638 reg &= ~BIT(port); 639 else 640 reg |= BIT(port); 641 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, reg); 642 } 643 EXPORT_SYMBOL(b53_brcm_hdr_setup); 644 645 static void b53_enable_cpu_port(struct b53_device *dev, int port) 646 { 647 u8 port_ctrl; 648 649 /* BCM5325 CPU port is at 8 */ 650 if ((is5325(dev) || is5365(dev)) && port == B53_CPU_PORT_25) 651 port = B53_CPU_PORT; 652 653 port_ctrl = PORT_CTRL_RX_BCST_EN | 654 PORT_CTRL_RX_MCST_EN | 655 PORT_CTRL_RX_UCST_EN; 656 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl); 657 658 b53_brcm_hdr_setup(dev->ds, port); 659 660 b53_br_egress_floods(dev->ds, port, true, true); 661 } 662 663 static void b53_enable_mib(struct b53_device *dev) 664 { 665 u8 gc; 666 667 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 668 gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN); 669 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); 670 } 671 672 static u16 b53_default_pvid(struct b53_device *dev) 673 { 674 if (is5325(dev) || is5365(dev)) 675 return 1; 676 else 677 return 0; 678 } 679 680 int b53_configure_vlan(struct dsa_switch *ds) 681 { 682 struct b53_device *dev = ds->priv; 683 struct b53_vlan vl = { 0 }; 684 int i, def_vid; 685 686 def_vid = b53_default_pvid(dev); 687 688 /* clear all vlan entries */ 689 if (is5325(dev) || is5365(dev)) { 690 for (i = def_vid; i < dev->num_vlans; i++) 691 b53_set_vlan_entry(dev, i, &vl); 692 } else { 693 b53_do_vlan_op(dev, VTA_CMD_CLEAR); 694 } 695 696 b53_enable_vlan(dev, dev->vlan_enabled, ds->vlan_filtering); 697 698 b53_for_each_port(dev, i) 699 b53_write16(dev, B53_VLAN_PAGE, 700 B53_VLAN_PORT_DEF_TAG(i), def_vid); 701 702 if (!is5325(dev) && !is5365(dev)) 703 b53_set_jumbo(dev, dev->enable_jumbo, false); 704 705 return 0; 706 } 707 EXPORT_SYMBOL(b53_configure_vlan); 708 709 static void b53_switch_reset_gpio(struct b53_device *dev) 710 { 711 int gpio = dev->reset_gpio; 712 713 if (gpio < 0) 714 return; 715 716 /* Reset sequence: RESET low(50ms)->high(20ms) 717 */ 718 gpio_set_value(gpio, 0); 719 mdelay(50); 720 721 gpio_set_value(gpio, 1); 722 mdelay(20); 723 724 dev->current_page = 0xff; 725 } 726 727 static int b53_switch_reset(struct b53_device *dev) 728 { 729 unsigned int timeout = 1000; 730 u8 mgmt, reg; 731 732 b53_switch_reset_gpio(dev); 733 734 if (is539x(dev)) { 735 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83); 736 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00); 737 } 738 739 /* This is specific to 58xx devices here, do not use is58xx() which 740 * covers the larger Starfigther 2 family, including 7445/7278 which 741 * still use this driver as a library and need to perform the reset 742 * earlier. 743 */ 744 if (dev->chip_id == BCM58XX_DEVICE_ID || 745 dev->chip_id == BCM583XX_DEVICE_ID) { 746 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®); 747 reg |= SW_RST | EN_SW_RST | EN_CH_RST; 748 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg); 749 750 do { 751 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®); 752 if (!(reg & SW_RST)) 753 break; 754 755 usleep_range(1000, 2000); 756 } while (timeout-- > 0); 757 758 if (timeout == 0) 759 return -ETIMEDOUT; 760 } 761 762 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 763 764 if (!(mgmt & SM_SW_FWD_EN)) { 765 mgmt &= ~SM_SW_FWD_MODE; 766 mgmt |= SM_SW_FWD_EN; 767 768 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 769 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 770 771 if (!(mgmt & SM_SW_FWD_EN)) { 772 dev_err(dev->dev, "Failed to enable switch!\n"); 773 return -EINVAL; 774 } 775 } 776 777 b53_enable_mib(dev); 778 779 return b53_flush_arl(dev, FAST_AGE_STATIC); 780 } 781 782 static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg) 783 { 784 struct b53_device *priv = ds->priv; 785 u16 value = 0; 786 int ret; 787 788 if (priv->ops->phy_read16) 789 ret = priv->ops->phy_read16(priv, addr, reg, &value); 790 else 791 ret = b53_read16(priv, B53_PORT_MII_PAGE(addr), 792 reg * 2, &value); 793 794 return ret ? ret : value; 795 } 796 797 static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val) 798 { 799 struct b53_device *priv = ds->priv; 800 801 if (priv->ops->phy_write16) 802 return priv->ops->phy_write16(priv, addr, reg, val); 803 804 return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val); 805 } 806 807 static int b53_reset_switch(struct b53_device *priv) 808 { 809 /* reset vlans */ 810 priv->enable_jumbo = false; 811 812 memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans); 813 memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports); 814 815 priv->serdes_lane = B53_INVALID_LANE; 816 817 return b53_switch_reset(priv); 818 } 819 820 static int b53_apply_config(struct b53_device *priv) 821 { 822 /* disable switching */ 823 b53_set_forwarding(priv, 0); 824 825 b53_configure_vlan(priv->ds); 826 827 /* enable switching */ 828 b53_set_forwarding(priv, 1); 829 830 return 0; 831 } 832 833 static void b53_reset_mib(struct b53_device *priv) 834 { 835 u8 gc; 836 837 b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 838 839 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB); 840 msleep(1); 841 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB); 842 msleep(1); 843 } 844 845 static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev) 846 { 847 if (is5365(dev)) 848 return b53_mibs_65; 849 else if (is63xx(dev)) 850 return b53_mibs_63xx; 851 else if (is58xx(dev)) 852 return b53_mibs_58xx; 853 else 854 return b53_mibs; 855 } 856 857 static unsigned int b53_get_mib_size(struct b53_device *dev) 858 { 859 if (is5365(dev)) 860 return B53_MIBS_65_SIZE; 861 else if (is63xx(dev)) 862 return B53_MIBS_63XX_SIZE; 863 else if (is58xx(dev)) 864 return B53_MIBS_58XX_SIZE; 865 else 866 return B53_MIBS_SIZE; 867 } 868 869 static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port) 870 { 871 /* These ports typically do not have built-in PHYs */ 872 switch (port) { 873 case B53_CPU_PORT_25: 874 case 7: 875 case B53_CPU_PORT: 876 return NULL; 877 } 878 879 return mdiobus_get_phy(ds->slave_mii_bus, port); 880 } 881 882 void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset, 883 uint8_t *data) 884 { 885 struct b53_device *dev = ds->priv; 886 const struct b53_mib_desc *mibs = b53_get_mib(dev); 887 unsigned int mib_size = b53_get_mib_size(dev); 888 struct phy_device *phydev; 889 unsigned int i; 890 891 if (stringset == ETH_SS_STATS) { 892 for (i = 0; i < mib_size; i++) 893 strlcpy(data + i * ETH_GSTRING_LEN, 894 mibs[i].name, ETH_GSTRING_LEN); 895 } else if (stringset == ETH_SS_PHY_STATS) { 896 phydev = b53_get_phy_device(ds, port); 897 if (!phydev) 898 return; 899 900 phy_ethtool_get_strings(phydev, data); 901 } 902 } 903 EXPORT_SYMBOL(b53_get_strings); 904 905 void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) 906 { 907 struct b53_device *dev = ds->priv; 908 const struct b53_mib_desc *mibs = b53_get_mib(dev); 909 unsigned int mib_size = b53_get_mib_size(dev); 910 const struct b53_mib_desc *s; 911 unsigned int i; 912 u64 val = 0; 913 914 if (is5365(dev) && port == 5) 915 port = 8; 916 917 mutex_lock(&dev->stats_mutex); 918 919 for (i = 0; i < mib_size; i++) { 920 s = &mibs[i]; 921 922 if (s->size == 8) { 923 b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val); 924 } else { 925 u32 val32; 926 927 b53_read32(dev, B53_MIB_PAGE(port), s->offset, 928 &val32); 929 val = val32; 930 } 931 data[i] = (u64)val; 932 } 933 934 mutex_unlock(&dev->stats_mutex); 935 } 936 EXPORT_SYMBOL(b53_get_ethtool_stats); 937 938 void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data) 939 { 940 struct phy_device *phydev; 941 942 phydev = b53_get_phy_device(ds, port); 943 if (!phydev) 944 return; 945 946 phy_ethtool_get_stats(phydev, NULL, data); 947 } 948 EXPORT_SYMBOL(b53_get_ethtool_phy_stats); 949 950 int b53_get_sset_count(struct dsa_switch *ds, int port, int sset) 951 { 952 struct b53_device *dev = ds->priv; 953 struct phy_device *phydev; 954 955 if (sset == ETH_SS_STATS) { 956 return b53_get_mib_size(dev); 957 } else if (sset == ETH_SS_PHY_STATS) { 958 phydev = b53_get_phy_device(ds, port); 959 if (!phydev) 960 return 0; 961 962 return phy_ethtool_get_sset_count(phydev); 963 } 964 965 return 0; 966 } 967 EXPORT_SYMBOL(b53_get_sset_count); 968 969 static int b53_setup(struct dsa_switch *ds) 970 { 971 struct b53_device *dev = ds->priv; 972 unsigned int port; 973 int ret; 974 975 ret = b53_reset_switch(dev); 976 if (ret) { 977 dev_err(ds->dev, "failed to reset switch\n"); 978 return ret; 979 } 980 981 b53_reset_mib(dev); 982 983 ret = b53_apply_config(dev); 984 if (ret) 985 dev_err(ds->dev, "failed to apply configuration\n"); 986 987 /* Configure IMP/CPU port, disable all other ports. Enabled 988 * ports will be configured with .port_enable 989 */ 990 for (port = 0; port < dev->num_ports; port++) { 991 if (dsa_is_cpu_port(ds, port)) 992 b53_enable_cpu_port(dev, port); 993 else 994 b53_disable_port(ds, port); 995 } 996 997 /* Let DSA handle the case were multiple bridges span the same switch 998 * device and different VLAN awareness settings are requested, which 999 * would be breaking filtering semantics for any of the other bridge 1000 * devices. (not hardware supported) 1001 */ 1002 ds->vlan_filtering_is_global = true; 1003 1004 return ret; 1005 } 1006 1007 static void b53_force_link(struct b53_device *dev, int port, int link) 1008 { 1009 u8 reg, val, off; 1010 1011 /* Override the port settings */ 1012 if (port == dev->cpu_port) { 1013 off = B53_PORT_OVERRIDE_CTRL; 1014 val = PORT_OVERRIDE_EN; 1015 } else { 1016 off = B53_GMII_PORT_OVERRIDE_CTRL(port); 1017 val = GMII_PO_EN; 1018 } 1019 1020 b53_read8(dev, B53_CTRL_PAGE, off, ®); 1021 reg |= val; 1022 if (link) 1023 reg |= PORT_OVERRIDE_LINK; 1024 else 1025 reg &= ~PORT_OVERRIDE_LINK; 1026 b53_write8(dev, B53_CTRL_PAGE, off, reg); 1027 } 1028 1029 static void b53_force_port_config(struct b53_device *dev, int port, 1030 int speed, int duplex, int pause) 1031 { 1032 u8 reg, val, off; 1033 1034 /* Override the port settings */ 1035 if (port == dev->cpu_port) { 1036 off = B53_PORT_OVERRIDE_CTRL; 1037 val = PORT_OVERRIDE_EN; 1038 } else { 1039 off = B53_GMII_PORT_OVERRIDE_CTRL(port); 1040 val = GMII_PO_EN; 1041 } 1042 1043 b53_read8(dev, B53_CTRL_PAGE, off, ®); 1044 reg |= val; 1045 if (duplex == DUPLEX_FULL) 1046 reg |= PORT_OVERRIDE_FULL_DUPLEX; 1047 else 1048 reg &= ~PORT_OVERRIDE_FULL_DUPLEX; 1049 1050 switch (speed) { 1051 case 2000: 1052 reg |= PORT_OVERRIDE_SPEED_2000M; 1053 /* fallthrough */ 1054 case SPEED_1000: 1055 reg |= PORT_OVERRIDE_SPEED_1000M; 1056 break; 1057 case SPEED_100: 1058 reg |= PORT_OVERRIDE_SPEED_100M; 1059 break; 1060 case SPEED_10: 1061 reg |= PORT_OVERRIDE_SPEED_10M; 1062 break; 1063 default: 1064 dev_err(dev->dev, "unknown speed: %d\n", speed); 1065 return; 1066 } 1067 1068 if (pause & MLO_PAUSE_RX) 1069 reg |= PORT_OVERRIDE_RX_FLOW; 1070 if (pause & MLO_PAUSE_TX) 1071 reg |= PORT_OVERRIDE_TX_FLOW; 1072 1073 b53_write8(dev, B53_CTRL_PAGE, off, reg); 1074 } 1075 1076 static void b53_adjust_link(struct dsa_switch *ds, int port, 1077 struct phy_device *phydev) 1078 { 1079 struct b53_device *dev = ds->priv; 1080 struct ethtool_eee *p = &dev->ports[port].eee; 1081 u8 rgmii_ctrl = 0, reg = 0, off; 1082 int pause = 0; 1083 1084 if (!phy_is_pseudo_fixed_link(phydev)) 1085 return; 1086 1087 /* Enable flow control on BCM5301x's CPU port */ 1088 if (is5301x(dev) && port == dev->cpu_port) 1089 pause = MLO_PAUSE_TXRX_MASK; 1090 1091 if (phydev->pause) { 1092 if (phydev->asym_pause) 1093 pause |= MLO_PAUSE_TX; 1094 pause |= MLO_PAUSE_RX; 1095 } 1096 1097 b53_force_port_config(dev, port, phydev->speed, phydev->duplex, pause); 1098 b53_force_link(dev, port, phydev->link); 1099 1100 if (is531x5(dev) && phy_interface_is_rgmii(phydev)) { 1101 if (port == 8) 1102 off = B53_RGMII_CTRL_IMP; 1103 else 1104 off = B53_RGMII_CTRL_P(port); 1105 1106 /* Configure the port RGMII clock delay by DLL disabled and 1107 * tx_clk aligned timing (restoring to reset defaults) 1108 */ 1109 b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl); 1110 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC | 1111 RGMII_CTRL_TIMING_SEL); 1112 1113 /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make 1114 * sure that we enable the port TX clock internal delay to 1115 * account for this internal delay that is inserted, otherwise 1116 * the switch won't be able to receive correctly. 1117 * 1118 * PHY_INTERFACE_MODE_RGMII means that we are not introducing 1119 * any delay neither on transmission nor reception, so the 1120 * BCM53125 must also be configured accordingly to account for 1121 * the lack of delay and introduce 1122 * 1123 * The BCM53125 switch has its RX clock and TX clock control 1124 * swapped, hence the reason why we modify the TX clock path in 1125 * the "RGMII" case 1126 */ 1127 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) 1128 rgmii_ctrl |= RGMII_CTRL_DLL_TXC; 1129 if (phydev->interface == PHY_INTERFACE_MODE_RGMII) 1130 rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC; 1131 rgmii_ctrl |= RGMII_CTRL_TIMING_SEL; 1132 b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl); 1133 1134 dev_info(ds->dev, "Configured port %d for %s\n", port, 1135 phy_modes(phydev->interface)); 1136 } 1137 1138 /* configure MII port if necessary */ 1139 if (is5325(dev)) { 1140 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1141 ®); 1142 1143 /* reverse mii needs to be enabled */ 1144 if (!(reg & PORT_OVERRIDE_RV_MII_25)) { 1145 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1146 reg | PORT_OVERRIDE_RV_MII_25); 1147 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1148 ®); 1149 1150 if (!(reg & PORT_OVERRIDE_RV_MII_25)) { 1151 dev_err(ds->dev, 1152 "Failed to enable reverse MII mode\n"); 1153 return; 1154 } 1155 } 1156 } else if (is5301x(dev)) { 1157 if (port != dev->cpu_port) { 1158 b53_force_port_config(dev, dev->cpu_port, 2000, 1159 DUPLEX_FULL, MLO_PAUSE_TXRX_MASK); 1160 b53_force_link(dev, dev->cpu_port, 1); 1161 } 1162 } 1163 1164 /* Re-negotiate EEE if it was enabled already */ 1165 p->eee_enabled = b53_eee_init(ds, port, phydev); 1166 } 1167 1168 void b53_port_event(struct dsa_switch *ds, int port) 1169 { 1170 struct b53_device *dev = ds->priv; 1171 bool link; 1172 u16 sts; 1173 1174 b53_read16(dev, B53_STAT_PAGE, B53_LINK_STAT, &sts); 1175 link = !!(sts & BIT(port)); 1176 dsa_port_phylink_mac_change(ds, port, link); 1177 } 1178 EXPORT_SYMBOL(b53_port_event); 1179 1180 void b53_phylink_validate(struct dsa_switch *ds, int port, 1181 unsigned long *supported, 1182 struct phylink_link_state *state) 1183 { 1184 struct b53_device *dev = ds->priv; 1185 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 1186 1187 if (dev->ops->serdes_phylink_validate) 1188 dev->ops->serdes_phylink_validate(dev, port, mask, state); 1189 1190 /* Allow all the expected bits */ 1191 phylink_set(mask, Autoneg); 1192 phylink_set_port_modes(mask); 1193 phylink_set(mask, Pause); 1194 phylink_set(mask, Asym_Pause); 1195 1196 /* With the exclusion of 5325/5365, MII, Reverse MII and 802.3z, we 1197 * support Gigabit, including Half duplex. 1198 */ 1199 if (state->interface != PHY_INTERFACE_MODE_MII && 1200 state->interface != PHY_INTERFACE_MODE_REVMII && 1201 !phy_interface_mode_is_8023z(state->interface) && 1202 !(is5325(dev) || is5365(dev))) { 1203 phylink_set(mask, 1000baseT_Full); 1204 phylink_set(mask, 1000baseT_Half); 1205 } 1206 1207 if (!phy_interface_mode_is_8023z(state->interface)) { 1208 phylink_set(mask, 10baseT_Half); 1209 phylink_set(mask, 10baseT_Full); 1210 phylink_set(mask, 100baseT_Half); 1211 phylink_set(mask, 100baseT_Full); 1212 } 1213 1214 bitmap_and(supported, supported, mask, 1215 __ETHTOOL_LINK_MODE_MASK_NBITS); 1216 bitmap_and(state->advertising, state->advertising, mask, 1217 __ETHTOOL_LINK_MODE_MASK_NBITS); 1218 1219 phylink_helper_basex_speed(state); 1220 } 1221 EXPORT_SYMBOL(b53_phylink_validate); 1222 1223 int b53_phylink_mac_link_state(struct dsa_switch *ds, int port, 1224 struct phylink_link_state *state) 1225 { 1226 struct b53_device *dev = ds->priv; 1227 int ret = -EOPNOTSUPP; 1228 1229 if ((phy_interface_mode_is_8023z(state->interface) || 1230 state->interface == PHY_INTERFACE_MODE_SGMII) && 1231 dev->ops->serdes_link_state) 1232 ret = dev->ops->serdes_link_state(dev, port, state); 1233 1234 return ret; 1235 } 1236 EXPORT_SYMBOL(b53_phylink_mac_link_state); 1237 1238 void b53_phylink_mac_config(struct dsa_switch *ds, int port, 1239 unsigned int mode, 1240 const struct phylink_link_state *state) 1241 { 1242 struct b53_device *dev = ds->priv; 1243 1244 if (mode == MLO_AN_PHY) 1245 return; 1246 1247 if (mode == MLO_AN_FIXED) { 1248 b53_force_port_config(dev, port, state->speed, 1249 state->duplex, state->pause); 1250 return; 1251 } 1252 1253 if ((phy_interface_mode_is_8023z(state->interface) || 1254 state->interface == PHY_INTERFACE_MODE_SGMII) && 1255 dev->ops->serdes_config) 1256 dev->ops->serdes_config(dev, port, mode, state); 1257 } 1258 EXPORT_SYMBOL(b53_phylink_mac_config); 1259 1260 void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port) 1261 { 1262 struct b53_device *dev = ds->priv; 1263 1264 if (dev->ops->serdes_an_restart) 1265 dev->ops->serdes_an_restart(dev, port); 1266 } 1267 EXPORT_SYMBOL(b53_phylink_mac_an_restart); 1268 1269 void b53_phylink_mac_link_down(struct dsa_switch *ds, int port, 1270 unsigned int mode, 1271 phy_interface_t interface) 1272 { 1273 struct b53_device *dev = ds->priv; 1274 1275 if (mode == MLO_AN_PHY) 1276 return; 1277 1278 if (mode == MLO_AN_FIXED) { 1279 b53_force_link(dev, port, false); 1280 return; 1281 } 1282 1283 if (phy_interface_mode_is_8023z(interface) && 1284 dev->ops->serdes_link_set) 1285 dev->ops->serdes_link_set(dev, port, mode, interface, false); 1286 } 1287 EXPORT_SYMBOL(b53_phylink_mac_link_down); 1288 1289 void b53_phylink_mac_link_up(struct dsa_switch *ds, int port, 1290 unsigned int mode, 1291 phy_interface_t interface, 1292 struct phy_device *phydev) 1293 { 1294 struct b53_device *dev = ds->priv; 1295 1296 if (mode == MLO_AN_PHY) 1297 return; 1298 1299 if (mode == MLO_AN_FIXED) { 1300 b53_force_link(dev, port, true); 1301 return; 1302 } 1303 1304 if (phy_interface_mode_is_8023z(interface) && 1305 dev->ops->serdes_link_set) 1306 dev->ops->serdes_link_set(dev, port, mode, interface, true); 1307 } 1308 EXPORT_SYMBOL(b53_phylink_mac_link_up); 1309 1310 int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) 1311 { 1312 struct b53_device *dev = ds->priv; 1313 u16 pvid, new_pvid; 1314 1315 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); 1316 new_pvid = pvid; 1317 if (!vlan_filtering) { 1318 /* Filtering is currently enabled, use the default PVID since 1319 * the bridge does not expect tagging anymore 1320 */ 1321 dev->ports[port].pvid = pvid; 1322 new_pvid = b53_default_pvid(dev); 1323 } else { 1324 /* Filtering is currently disabled, restore the previous PVID */ 1325 new_pvid = dev->ports[port].pvid; 1326 } 1327 1328 if (pvid != new_pvid) 1329 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), 1330 new_pvid); 1331 1332 b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering); 1333 1334 return 0; 1335 } 1336 EXPORT_SYMBOL(b53_vlan_filtering); 1337 1338 int b53_vlan_prepare(struct dsa_switch *ds, int port, 1339 const struct switchdev_obj_port_vlan *vlan) 1340 { 1341 struct b53_device *dev = ds->priv; 1342 1343 if ((is5325(dev) || is5365(dev)) && vlan->vid_begin == 0) 1344 return -EOPNOTSUPP; 1345 1346 if (vlan->vid_end > dev->num_vlans) 1347 return -ERANGE; 1348 1349 b53_enable_vlan(dev, true, ds->vlan_filtering); 1350 1351 return 0; 1352 } 1353 EXPORT_SYMBOL(b53_vlan_prepare); 1354 1355 void b53_vlan_add(struct dsa_switch *ds, int port, 1356 const struct switchdev_obj_port_vlan *vlan) 1357 { 1358 struct b53_device *dev = ds->priv; 1359 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1360 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 1361 struct b53_vlan *vl; 1362 u16 vid; 1363 1364 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { 1365 vl = &dev->vlans[vid]; 1366 1367 b53_get_vlan_entry(dev, vid, vl); 1368 1369 vl->members |= BIT(port); 1370 if (untagged && !dsa_is_cpu_port(ds, port)) 1371 vl->untag |= BIT(port); 1372 else 1373 vl->untag &= ~BIT(port); 1374 1375 b53_set_vlan_entry(dev, vid, vl); 1376 b53_fast_age_vlan(dev, vid); 1377 } 1378 1379 if (pvid && !dsa_is_cpu_port(ds, port)) { 1380 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), 1381 vlan->vid_end); 1382 b53_fast_age_vlan(dev, vid); 1383 } 1384 } 1385 EXPORT_SYMBOL(b53_vlan_add); 1386 1387 int b53_vlan_del(struct dsa_switch *ds, int port, 1388 const struct switchdev_obj_port_vlan *vlan) 1389 { 1390 struct b53_device *dev = ds->priv; 1391 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1392 struct b53_vlan *vl; 1393 u16 vid; 1394 u16 pvid; 1395 1396 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); 1397 1398 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { 1399 vl = &dev->vlans[vid]; 1400 1401 b53_get_vlan_entry(dev, vid, vl); 1402 1403 vl->members &= ~BIT(port); 1404 1405 if (pvid == vid) 1406 pvid = b53_default_pvid(dev); 1407 1408 if (untagged && !dsa_is_cpu_port(ds, port)) 1409 vl->untag &= ~(BIT(port)); 1410 1411 b53_set_vlan_entry(dev, vid, vl); 1412 b53_fast_age_vlan(dev, vid); 1413 } 1414 1415 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid); 1416 b53_fast_age_vlan(dev, pvid); 1417 1418 return 0; 1419 } 1420 EXPORT_SYMBOL(b53_vlan_del); 1421 1422 /* Address Resolution Logic routines */ 1423 static int b53_arl_op_wait(struct b53_device *dev) 1424 { 1425 unsigned int timeout = 10; 1426 u8 reg; 1427 1428 do { 1429 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®); 1430 if (!(reg & ARLTBL_START_DONE)) 1431 return 0; 1432 1433 usleep_range(1000, 2000); 1434 } while (timeout--); 1435 1436 dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg); 1437 1438 return -ETIMEDOUT; 1439 } 1440 1441 static int b53_arl_rw_op(struct b53_device *dev, unsigned int op) 1442 { 1443 u8 reg; 1444 1445 if (op > ARLTBL_RW) 1446 return -EINVAL; 1447 1448 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®); 1449 reg |= ARLTBL_START_DONE; 1450 if (op) 1451 reg |= ARLTBL_RW; 1452 else 1453 reg &= ~ARLTBL_RW; 1454 b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg); 1455 1456 return b53_arl_op_wait(dev); 1457 } 1458 1459 static int b53_arl_read(struct b53_device *dev, u64 mac, 1460 u16 vid, struct b53_arl_entry *ent, u8 *idx, 1461 bool is_valid) 1462 { 1463 unsigned int i; 1464 int ret; 1465 1466 ret = b53_arl_op_wait(dev); 1467 if (ret) 1468 return ret; 1469 1470 /* Read the bins */ 1471 for (i = 0; i < dev->num_arl_entries; i++) { 1472 u64 mac_vid; 1473 u32 fwd_entry; 1474 1475 b53_read64(dev, B53_ARLIO_PAGE, 1476 B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid); 1477 b53_read32(dev, B53_ARLIO_PAGE, 1478 B53_ARLTBL_DATA_ENTRY(i), &fwd_entry); 1479 b53_arl_to_entry(ent, mac_vid, fwd_entry); 1480 1481 if (!(fwd_entry & ARLTBL_VALID)) 1482 continue; 1483 if ((mac_vid & ARLTBL_MAC_MASK) != mac) 1484 continue; 1485 *idx = i; 1486 } 1487 1488 return -ENOENT; 1489 } 1490 1491 static int b53_arl_op(struct b53_device *dev, int op, int port, 1492 const unsigned char *addr, u16 vid, bool is_valid) 1493 { 1494 struct b53_arl_entry ent; 1495 u32 fwd_entry; 1496 u64 mac, mac_vid = 0; 1497 u8 idx = 0; 1498 int ret; 1499 1500 /* Convert the array into a 64-bit MAC */ 1501 mac = ether_addr_to_u64(addr); 1502 1503 /* Perform a read for the given MAC and VID */ 1504 b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac); 1505 b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid); 1506 1507 /* Issue a read operation for this MAC */ 1508 ret = b53_arl_rw_op(dev, 1); 1509 if (ret) 1510 return ret; 1511 1512 ret = b53_arl_read(dev, mac, vid, &ent, &idx, is_valid); 1513 /* If this is a read, just finish now */ 1514 if (op) 1515 return ret; 1516 1517 /* We could not find a matching MAC, so reset to a new entry */ 1518 if (ret) { 1519 fwd_entry = 0; 1520 idx = 1; 1521 } 1522 1523 /* For multicast address, the port is a bitmask and the validity 1524 * is determined by having at least one port being still active 1525 */ 1526 if (!is_multicast_ether_addr(addr)) { 1527 ent.port = port; 1528 ent.is_valid = is_valid; 1529 } else { 1530 if (is_valid) 1531 ent.port |= BIT(port); 1532 else 1533 ent.port &= ~BIT(port); 1534 1535 ent.is_valid = !!(ent.port); 1536 } 1537 1538 ent.is_valid = is_valid; 1539 ent.vid = vid; 1540 ent.is_static = true; 1541 ent.is_age = false; 1542 memcpy(ent.mac, addr, ETH_ALEN); 1543 b53_arl_from_entry(&mac_vid, &fwd_entry, &ent); 1544 1545 b53_write64(dev, B53_ARLIO_PAGE, 1546 B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid); 1547 b53_write32(dev, B53_ARLIO_PAGE, 1548 B53_ARLTBL_DATA_ENTRY(idx), fwd_entry); 1549 1550 return b53_arl_rw_op(dev, 0); 1551 } 1552 1553 int b53_fdb_add(struct dsa_switch *ds, int port, 1554 const unsigned char *addr, u16 vid) 1555 { 1556 struct b53_device *priv = ds->priv; 1557 1558 /* 5325 and 5365 require some more massaging, but could 1559 * be supported eventually 1560 */ 1561 if (is5325(priv) || is5365(priv)) 1562 return -EOPNOTSUPP; 1563 1564 return b53_arl_op(priv, 0, port, addr, vid, true); 1565 } 1566 EXPORT_SYMBOL(b53_fdb_add); 1567 1568 int b53_fdb_del(struct dsa_switch *ds, int port, 1569 const unsigned char *addr, u16 vid) 1570 { 1571 struct b53_device *priv = ds->priv; 1572 1573 return b53_arl_op(priv, 0, port, addr, vid, false); 1574 } 1575 EXPORT_SYMBOL(b53_fdb_del); 1576 1577 static int b53_arl_search_wait(struct b53_device *dev) 1578 { 1579 unsigned int timeout = 1000; 1580 u8 reg; 1581 1582 do { 1583 b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, ®); 1584 if (!(reg & ARL_SRCH_STDN)) 1585 return 0; 1586 1587 if (reg & ARL_SRCH_VLID) 1588 return 0; 1589 1590 usleep_range(1000, 2000); 1591 } while (timeout--); 1592 1593 return -ETIMEDOUT; 1594 } 1595 1596 static void b53_arl_search_rd(struct b53_device *dev, u8 idx, 1597 struct b53_arl_entry *ent) 1598 { 1599 u64 mac_vid; 1600 u32 fwd_entry; 1601 1602 b53_read64(dev, B53_ARLIO_PAGE, 1603 B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid); 1604 b53_read32(dev, B53_ARLIO_PAGE, 1605 B53_ARL_SRCH_RSTL(idx), &fwd_entry); 1606 b53_arl_to_entry(ent, mac_vid, fwd_entry); 1607 } 1608 1609 static int b53_fdb_copy(int port, const struct b53_arl_entry *ent, 1610 dsa_fdb_dump_cb_t *cb, void *data) 1611 { 1612 if (!ent->is_valid) 1613 return 0; 1614 1615 if (port != ent->port) 1616 return 0; 1617 1618 return cb(ent->mac, ent->vid, ent->is_static, data); 1619 } 1620 1621 int b53_fdb_dump(struct dsa_switch *ds, int port, 1622 dsa_fdb_dump_cb_t *cb, void *data) 1623 { 1624 struct b53_device *priv = ds->priv; 1625 struct b53_arl_entry results[2]; 1626 unsigned int count = 0; 1627 int ret; 1628 u8 reg; 1629 1630 /* Start search operation */ 1631 reg = ARL_SRCH_STDN; 1632 b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg); 1633 1634 do { 1635 ret = b53_arl_search_wait(priv); 1636 if (ret) 1637 return ret; 1638 1639 b53_arl_search_rd(priv, 0, &results[0]); 1640 ret = b53_fdb_copy(port, &results[0], cb, data); 1641 if (ret) 1642 return ret; 1643 1644 if (priv->num_arl_entries > 2) { 1645 b53_arl_search_rd(priv, 1, &results[1]); 1646 ret = b53_fdb_copy(port, &results[1], cb, data); 1647 if (ret) 1648 return ret; 1649 1650 if (!results[0].is_valid && !results[1].is_valid) 1651 break; 1652 } 1653 1654 } while (count++ < 1024); 1655 1656 return 0; 1657 } 1658 EXPORT_SYMBOL(b53_fdb_dump); 1659 1660 int b53_mdb_prepare(struct dsa_switch *ds, int port, 1661 const struct switchdev_obj_port_mdb *mdb) 1662 { 1663 struct b53_device *priv = ds->priv; 1664 1665 /* 5325 and 5365 require some more massaging, but could 1666 * be supported eventually 1667 */ 1668 if (is5325(priv) || is5365(priv)) 1669 return -EOPNOTSUPP; 1670 1671 return 0; 1672 } 1673 EXPORT_SYMBOL(b53_mdb_prepare); 1674 1675 void b53_mdb_add(struct dsa_switch *ds, int port, 1676 const struct switchdev_obj_port_mdb *mdb) 1677 { 1678 struct b53_device *priv = ds->priv; 1679 int ret; 1680 1681 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true); 1682 if (ret) 1683 dev_err(ds->dev, "failed to add MDB entry\n"); 1684 } 1685 EXPORT_SYMBOL(b53_mdb_add); 1686 1687 int b53_mdb_del(struct dsa_switch *ds, int port, 1688 const struct switchdev_obj_port_mdb *mdb) 1689 { 1690 struct b53_device *priv = ds->priv; 1691 int ret; 1692 1693 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false); 1694 if (ret) 1695 dev_err(ds->dev, "failed to delete MDB entry\n"); 1696 1697 return ret; 1698 } 1699 EXPORT_SYMBOL(b53_mdb_del); 1700 1701 int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br) 1702 { 1703 struct b53_device *dev = ds->priv; 1704 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 1705 u16 pvlan, reg; 1706 unsigned int i; 1707 1708 /* Make this port leave the all VLANs join since we will have proper 1709 * VLAN entries from now on 1710 */ 1711 if (is58xx(dev)) { 1712 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®); 1713 reg &= ~BIT(port); 1714 if ((reg & BIT(cpu_port)) == BIT(cpu_port)) 1715 reg &= ~BIT(cpu_port); 1716 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); 1717 } 1718 1719 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 1720 1721 b53_for_each_port(dev, i) { 1722 if (dsa_to_port(ds, i)->bridge_dev != br) 1723 continue; 1724 1725 /* Add this local port to the remote port VLAN control 1726 * membership and update the remote port bitmask 1727 */ 1728 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); 1729 reg |= BIT(port); 1730 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg); 1731 dev->ports[i].vlan_ctl_mask = reg; 1732 1733 pvlan |= BIT(i); 1734 } 1735 1736 /* Configure the local port VLAN control membership to include 1737 * remote ports and update the local port bitmask 1738 */ 1739 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 1740 dev->ports[port].vlan_ctl_mask = pvlan; 1741 1742 return 0; 1743 } 1744 EXPORT_SYMBOL(b53_br_join); 1745 1746 void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) 1747 { 1748 struct b53_device *dev = ds->priv; 1749 struct b53_vlan *vl = &dev->vlans[0]; 1750 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 1751 unsigned int i; 1752 u16 pvlan, reg, pvid; 1753 1754 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 1755 1756 b53_for_each_port(dev, i) { 1757 /* Don't touch the remaining ports */ 1758 if (dsa_to_port(ds, i)->bridge_dev != br) 1759 continue; 1760 1761 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); 1762 reg &= ~BIT(port); 1763 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg); 1764 dev->ports[port].vlan_ctl_mask = reg; 1765 1766 /* Prevent self removal to preserve isolation */ 1767 if (port != i) 1768 pvlan &= ~BIT(i); 1769 } 1770 1771 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 1772 dev->ports[port].vlan_ctl_mask = pvlan; 1773 1774 pvid = b53_default_pvid(dev); 1775 1776 /* Make this port join all VLANs without VLAN entries */ 1777 if (is58xx(dev)) { 1778 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®); 1779 reg |= BIT(port); 1780 if (!(reg & BIT(cpu_port))) 1781 reg |= BIT(cpu_port); 1782 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); 1783 } else { 1784 b53_get_vlan_entry(dev, pvid, vl); 1785 vl->members |= BIT(port) | BIT(cpu_port); 1786 vl->untag |= BIT(port) | BIT(cpu_port); 1787 b53_set_vlan_entry(dev, pvid, vl); 1788 } 1789 } 1790 EXPORT_SYMBOL(b53_br_leave); 1791 1792 void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state) 1793 { 1794 struct b53_device *dev = ds->priv; 1795 u8 hw_state; 1796 u8 reg; 1797 1798 switch (state) { 1799 case BR_STATE_DISABLED: 1800 hw_state = PORT_CTRL_DIS_STATE; 1801 break; 1802 case BR_STATE_LISTENING: 1803 hw_state = PORT_CTRL_LISTEN_STATE; 1804 break; 1805 case BR_STATE_LEARNING: 1806 hw_state = PORT_CTRL_LEARN_STATE; 1807 break; 1808 case BR_STATE_FORWARDING: 1809 hw_state = PORT_CTRL_FWD_STATE; 1810 break; 1811 case BR_STATE_BLOCKING: 1812 hw_state = PORT_CTRL_BLOCK_STATE; 1813 break; 1814 default: 1815 dev_err(ds->dev, "invalid STP state: %d\n", state); 1816 return; 1817 } 1818 1819 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); 1820 reg &= ~PORT_CTRL_STP_STATE_MASK; 1821 reg |= hw_state; 1822 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); 1823 } 1824 EXPORT_SYMBOL(b53_br_set_stp_state); 1825 1826 void b53_br_fast_age(struct dsa_switch *ds, int port) 1827 { 1828 struct b53_device *dev = ds->priv; 1829 1830 if (b53_fast_age_port(dev, port)) 1831 dev_err(ds->dev, "fast ageing failed\n"); 1832 } 1833 EXPORT_SYMBOL(b53_br_fast_age); 1834 1835 int b53_br_egress_floods(struct dsa_switch *ds, int port, 1836 bool unicast, bool multicast) 1837 { 1838 struct b53_device *dev = ds->priv; 1839 u16 uc, mc; 1840 1841 b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc); 1842 if (unicast) 1843 uc |= BIT(port); 1844 else 1845 uc &= ~BIT(port); 1846 b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc); 1847 1848 b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc); 1849 if (multicast) 1850 mc |= BIT(port); 1851 else 1852 mc &= ~BIT(port); 1853 b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc); 1854 1855 b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc); 1856 if (multicast) 1857 mc |= BIT(port); 1858 else 1859 mc &= ~BIT(port); 1860 b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc); 1861 1862 return 0; 1863 1864 } 1865 EXPORT_SYMBOL(b53_br_egress_floods); 1866 1867 static bool b53_possible_cpu_port(struct dsa_switch *ds, int port) 1868 { 1869 /* Broadcom switches will accept enabling Broadcom tags on the 1870 * following ports: 5, 7 and 8, any other port is not supported 1871 */ 1872 switch (port) { 1873 case B53_CPU_PORT_25: 1874 case 7: 1875 case B53_CPU_PORT: 1876 return true; 1877 } 1878 1879 return false; 1880 } 1881 1882 static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port, 1883 enum dsa_tag_protocol tag_protocol) 1884 { 1885 bool ret = b53_possible_cpu_port(ds, port); 1886 1887 if (!ret) { 1888 dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n", 1889 port); 1890 return ret; 1891 } 1892 1893 switch (tag_protocol) { 1894 case DSA_TAG_PROTO_BRCM: 1895 case DSA_TAG_PROTO_BRCM_PREPEND: 1896 dev_warn(ds->dev, 1897 "Port %d is stacked to Broadcom tag switch\n", port); 1898 ret = false; 1899 break; 1900 default: 1901 ret = true; 1902 break; 1903 } 1904 1905 return ret; 1906 } 1907 1908 enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port, 1909 enum dsa_tag_protocol mprot) 1910 { 1911 struct b53_device *dev = ds->priv; 1912 1913 /* Older models (5325, 5365) support a different tag format that we do 1914 * not support in net/dsa/tag_brcm.c yet. 1915 */ 1916 if (is5325(dev) || is5365(dev) || 1917 !b53_can_enable_brcm_tags(ds, port, mprot)) { 1918 dev->tag_protocol = DSA_TAG_PROTO_NONE; 1919 goto out; 1920 } 1921 1922 /* Broadcom BCM58xx chips have a flow accelerator on Port 8 1923 * which requires us to use the prepended Broadcom tag type 1924 */ 1925 if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT) { 1926 dev->tag_protocol = DSA_TAG_PROTO_BRCM_PREPEND; 1927 goto out; 1928 } 1929 1930 dev->tag_protocol = DSA_TAG_PROTO_BRCM; 1931 out: 1932 return dev->tag_protocol; 1933 } 1934 EXPORT_SYMBOL(b53_get_tag_protocol); 1935 1936 int b53_mirror_add(struct dsa_switch *ds, int port, 1937 struct dsa_mall_mirror_tc_entry *mirror, bool ingress) 1938 { 1939 struct b53_device *dev = ds->priv; 1940 u16 reg, loc; 1941 1942 if (ingress) 1943 loc = B53_IG_MIR_CTL; 1944 else 1945 loc = B53_EG_MIR_CTL; 1946 1947 b53_read16(dev, B53_MGMT_PAGE, loc, ®); 1948 reg |= BIT(port); 1949 b53_write16(dev, B53_MGMT_PAGE, loc, reg); 1950 1951 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®); 1952 reg &= ~CAP_PORT_MASK; 1953 reg |= mirror->to_local_port; 1954 reg |= MIRROR_EN; 1955 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg); 1956 1957 return 0; 1958 } 1959 EXPORT_SYMBOL(b53_mirror_add); 1960 1961 void b53_mirror_del(struct dsa_switch *ds, int port, 1962 struct dsa_mall_mirror_tc_entry *mirror) 1963 { 1964 struct b53_device *dev = ds->priv; 1965 bool loc_disable = false, other_loc_disable = false; 1966 u16 reg, loc; 1967 1968 if (mirror->ingress) 1969 loc = B53_IG_MIR_CTL; 1970 else 1971 loc = B53_EG_MIR_CTL; 1972 1973 /* Update the desired ingress/egress register */ 1974 b53_read16(dev, B53_MGMT_PAGE, loc, ®); 1975 reg &= ~BIT(port); 1976 if (!(reg & MIRROR_MASK)) 1977 loc_disable = true; 1978 b53_write16(dev, B53_MGMT_PAGE, loc, reg); 1979 1980 /* Now look at the other one to know if we can disable mirroring 1981 * entirely 1982 */ 1983 if (mirror->ingress) 1984 b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, ®); 1985 else 1986 b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, ®); 1987 if (!(reg & MIRROR_MASK)) 1988 other_loc_disable = true; 1989 1990 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®); 1991 /* Both no longer have ports, let's disable mirroring */ 1992 if (loc_disable && other_loc_disable) { 1993 reg &= ~MIRROR_EN; 1994 reg &= ~mirror->to_local_port; 1995 } 1996 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg); 1997 } 1998 EXPORT_SYMBOL(b53_mirror_del); 1999 2000 void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable) 2001 { 2002 struct b53_device *dev = ds->priv; 2003 u16 reg; 2004 2005 b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, ®); 2006 if (enable) 2007 reg |= BIT(port); 2008 else 2009 reg &= ~BIT(port); 2010 b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg); 2011 } 2012 EXPORT_SYMBOL(b53_eee_enable_set); 2013 2014 2015 /* Returns 0 if EEE was not enabled, or 1 otherwise 2016 */ 2017 int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy) 2018 { 2019 int ret; 2020 2021 ret = phy_init_eee(phy, 0); 2022 if (ret) 2023 return 0; 2024 2025 b53_eee_enable_set(ds, port, true); 2026 2027 return 1; 2028 } 2029 EXPORT_SYMBOL(b53_eee_init); 2030 2031 int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) 2032 { 2033 struct b53_device *dev = ds->priv; 2034 struct ethtool_eee *p = &dev->ports[port].eee; 2035 u16 reg; 2036 2037 if (is5325(dev) || is5365(dev)) 2038 return -EOPNOTSUPP; 2039 2040 b53_read16(dev, B53_EEE_PAGE, B53_EEE_LPI_INDICATE, ®); 2041 e->eee_enabled = p->eee_enabled; 2042 e->eee_active = !!(reg & BIT(port)); 2043 2044 return 0; 2045 } 2046 EXPORT_SYMBOL(b53_get_mac_eee); 2047 2048 int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) 2049 { 2050 struct b53_device *dev = ds->priv; 2051 struct ethtool_eee *p = &dev->ports[port].eee; 2052 2053 if (is5325(dev) || is5365(dev)) 2054 return -EOPNOTSUPP; 2055 2056 p->eee_enabled = e->eee_enabled; 2057 b53_eee_enable_set(ds, port, e->eee_enabled); 2058 2059 return 0; 2060 } 2061 EXPORT_SYMBOL(b53_set_mac_eee); 2062 2063 static const struct dsa_switch_ops b53_switch_ops = { 2064 .get_tag_protocol = b53_get_tag_protocol, 2065 .setup = b53_setup, 2066 .get_strings = b53_get_strings, 2067 .get_ethtool_stats = b53_get_ethtool_stats, 2068 .get_sset_count = b53_get_sset_count, 2069 .get_ethtool_phy_stats = b53_get_ethtool_phy_stats, 2070 .phy_read = b53_phy_read16, 2071 .phy_write = b53_phy_write16, 2072 .adjust_link = b53_adjust_link, 2073 .phylink_validate = b53_phylink_validate, 2074 .phylink_mac_link_state = b53_phylink_mac_link_state, 2075 .phylink_mac_config = b53_phylink_mac_config, 2076 .phylink_mac_an_restart = b53_phylink_mac_an_restart, 2077 .phylink_mac_link_down = b53_phylink_mac_link_down, 2078 .phylink_mac_link_up = b53_phylink_mac_link_up, 2079 .port_enable = b53_enable_port, 2080 .port_disable = b53_disable_port, 2081 .get_mac_eee = b53_get_mac_eee, 2082 .set_mac_eee = b53_set_mac_eee, 2083 .port_bridge_join = b53_br_join, 2084 .port_bridge_leave = b53_br_leave, 2085 .port_stp_state_set = b53_br_set_stp_state, 2086 .port_fast_age = b53_br_fast_age, 2087 .port_egress_floods = b53_br_egress_floods, 2088 .port_vlan_filtering = b53_vlan_filtering, 2089 .port_vlan_prepare = b53_vlan_prepare, 2090 .port_vlan_add = b53_vlan_add, 2091 .port_vlan_del = b53_vlan_del, 2092 .port_fdb_dump = b53_fdb_dump, 2093 .port_fdb_add = b53_fdb_add, 2094 .port_fdb_del = b53_fdb_del, 2095 .port_mirror_add = b53_mirror_add, 2096 .port_mirror_del = b53_mirror_del, 2097 .port_mdb_prepare = b53_mdb_prepare, 2098 .port_mdb_add = b53_mdb_add, 2099 .port_mdb_del = b53_mdb_del, 2100 }; 2101 2102 struct b53_chip_data { 2103 u32 chip_id; 2104 const char *dev_name; 2105 u16 vlans; 2106 u16 enabled_ports; 2107 u8 cpu_port; 2108 u8 vta_regs[3]; 2109 u8 arl_entries; 2110 u8 duplex_reg; 2111 u8 jumbo_pm_reg; 2112 u8 jumbo_size_reg; 2113 }; 2114 2115 #define B53_VTA_REGS \ 2116 { B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY } 2117 #define B53_VTA_REGS_9798 \ 2118 { B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 } 2119 #define B53_VTA_REGS_63XX \ 2120 { B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX } 2121 2122 static const struct b53_chip_data b53_switch_chips[] = { 2123 { 2124 .chip_id = BCM5325_DEVICE_ID, 2125 .dev_name = "BCM5325", 2126 .vlans = 16, 2127 .enabled_ports = 0x1f, 2128 .arl_entries = 2, 2129 .cpu_port = B53_CPU_PORT_25, 2130 .duplex_reg = B53_DUPLEX_STAT_FE, 2131 }, 2132 { 2133 .chip_id = BCM5365_DEVICE_ID, 2134 .dev_name = "BCM5365", 2135 .vlans = 256, 2136 .enabled_ports = 0x1f, 2137 .arl_entries = 2, 2138 .cpu_port = B53_CPU_PORT_25, 2139 .duplex_reg = B53_DUPLEX_STAT_FE, 2140 }, 2141 { 2142 .chip_id = BCM5389_DEVICE_ID, 2143 .dev_name = "BCM5389", 2144 .vlans = 4096, 2145 .enabled_ports = 0x1f, 2146 .arl_entries = 4, 2147 .cpu_port = B53_CPU_PORT, 2148 .vta_regs = B53_VTA_REGS, 2149 .duplex_reg = B53_DUPLEX_STAT_GE, 2150 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2151 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2152 }, 2153 { 2154 .chip_id = BCM5395_DEVICE_ID, 2155 .dev_name = "BCM5395", 2156 .vlans = 4096, 2157 .enabled_ports = 0x1f, 2158 .arl_entries = 4, 2159 .cpu_port = B53_CPU_PORT, 2160 .vta_regs = B53_VTA_REGS, 2161 .duplex_reg = B53_DUPLEX_STAT_GE, 2162 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2163 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2164 }, 2165 { 2166 .chip_id = BCM5397_DEVICE_ID, 2167 .dev_name = "BCM5397", 2168 .vlans = 4096, 2169 .enabled_ports = 0x1f, 2170 .arl_entries = 4, 2171 .cpu_port = B53_CPU_PORT, 2172 .vta_regs = B53_VTA_REGS_9798, 2173 .duplex_reg = B53_DUPLEX_STAT_GE, 2174 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2175 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2176 }, 2177 { 2178 .chip_id = BCM5398_DEVICE_ID, 2179 .dev_name = "BCM5398", 2180 .vlans = 4096, 2181 .enabled_ports = 0x7f, 2182 .arl_entries = 4, 2183 .cpu_port = B53_CPU_PORT, 2184 .vta_regs = B53_VTA_REGS_9798, 2185 .duplex_reg = B53_DUPLEX_STAT_GE, 2186 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2187 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2188 }, 2189 { 2190 .chip_id = BCM53115_DEVICE_ID, 2191 .dev_name = "BCM53115", 2192 .vlans = 4096, 2193 .enabled_ports = 0x1f, 2194 .arl_entries = 4, 2195 .vta_regs = B53_VTA_REGS, 2196 .cpu_port = B53_CPU_PORT, 2197 .duplex_reg = B53_DUPLEX_STAT_GE, 2198 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2199 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2200 }, 2201 { 2202 .chip_id = BCM53125_DEVICE_ID, 2203 .dev_name = "BCM53125", 2204 .vlans = 4096, 2205 .enabled_ports = 0xff, 2206 .arl_entries = 4, 2207 .cpu_port = B53_CPU_PORT, 2208 .vta_regs = B53_VTA_REGS, 2209 .duplex_reg = B53_DUPLEX_STAT_GE, 2210 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2211 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2212 }, 2213 { 2214 .chip_id = BCM53128_DEVICE_ID, 2215 .dev_name = "BCM53128", 2216 .vlans = 4096, 2217 .enabled_ports = 0x1ff, 2218 .arl_entries = 4, 2219 .cpu_port = B53_CPU_PORT, 2220 .vta_regs = B53_VTA_REGS, 2221 .duplex_reg = B53_DUPLEX_STAT_GE, 2222 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2223 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2224 }, 2225 { 2226 .chip_id = BCM63XX_DEVICE_ID, 2227 .dev_name = "BCM63xx", 2228 .vlans = 4096, 2229 .enabled_ports = 0, /* pdata must provide them */ 2230 .arl_entries = 4, 2231 .cpu_port = B53_CPU_PORT, 2232 .vta_regs = B53_VTA_REGS_63XX, 2233 .duplex_reg = B53_DUPLEX_STAT_63XX, 2234 .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX, 2235 .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX, 2236 }, 2237 { 2238 .chip_id = BCM53010_DEVICE_ID, 2239 .dev_name = "BCM53010", 2240 .vlans = 4096, 2241 .enabled_ports = 0x1f, 2242 .arl_entries = 4, 2243 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ 2244 .vta_regs = B53_VTA_REGS, 2245 .duplex_reg = B53_DUPLEX_STAT_GE, 2246 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2247 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2248 }, 2249 { 2250 .chip_id = BCM53011_DEVICE_ID, 2251 .dev_name = "BCM53011", 2252 .vlans = 4096, 2253 .enabled_ports = 0x1bf, 2254 .arl_entries = 4, 2255 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ 2256 .vta_regs = B53_VTA_REGS, 2257 .duplex_reg = B53_DUPLEX_STAT_GE, 2258 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2259 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2260 }, 2261 { 2262 .chip_id = BCM53012_DEVICE_ID, 2263 .dev_name = "BCM53012", 2264 .vlans = 4096, 2265 .enabled_ports = 0x1bf, 2266 .arl_entries = 4, 2267 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ 2268 .vta_regs = B53_VTA_REGS, 2269 .duplex_reg = B53_DUPLEX_STAT_GE, 2270 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2271 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2272 }, 2273 { 2274 .chip_id = BCM53018_DEVICE_ID, 2275 .dev_name = "BCM53018", 2276 .vlans = 4096, 2277 .enabled_ports = 0x1f, 2278 .arl_entries = 4, 2279 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ 2280 .vta_regs = B53_VTA_REGS, 2281 .duplex_reg = B53_DUPLEX_STAT_GE, 2282 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2283 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2284 }, 2285 { 2286 .chip_id = BCM53019_DEVICE_ID, 2287 .dev_name = "BCM53019", 2288 .vlans = 4096, 2289 .enabled_ports = 0x1f, 2290 .arl_entries = 4, 2291 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ 2292 .vta_regs = B53_VTA_REGS, 2293 .duplex_reg = B53_DUPLEX_STAT_GE, 2294 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2295 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2296 }, 2297 { 2298 .chip_id = BCM58XX_DEVICE_ID, 2299 .dev_name = "BCM585xx/586xx/88312", 2300 .vlans = 4096, 2301 .enabled_ports = 0x1ff, 2302 .arl_entries = 4, 2303 .cpu_port = B53_CPU_PORT, 2304 .vta_regs = B53_VTA_REGS, 2305 .duplex_reg = B53_DUPLEX_STAT_GE, 2306 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2307 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2308 }, 2309 { 2310 .chip_id = BCM583XX_DEVICE_ID, 2311 .dev_name = "BCM583xx/11360", 2312 .vlans = 4096, 2313 .enabled_ports = 0x103, 2314 .arl_entries = 4, 2315 .cpu_port = B53_CPU_PORT, 2316 .vta_regs = B53_VTA_REGS, 2317 .duplex_reg = B53_DUPLEX_STAT_GE, 2318 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2319 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2320 }, 2321 { 2322 .chip_id = BCM7445_DEVICE_ID, 2323 .dev_name = "BCM7445", 2324 .vlans = 4096, 2325 .enabled_ports = 0x1ff, 2326 .arl_entries = 4, 2327 .cpu_port = B53_CPU_PORT, 2328 .vta_regs = B53_VTA_REGS, 2329 .duplex_reg = B53_DUPLEX_STAT_GE, 2330 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2331 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2332 }, 2333 { 2334 .chip_id = BCM7278_DEVICE_ID, 2335 .dev_name = "BCM7278", 2336 .vlans = 4096, 2337 .enabled_ports = 0x1ff, 2338 .arl_entries= 4, 2339 .cpu_port = B53_CPU_PORT, 2340 .vta_regs = B53_VTA_REGS, 2341 .duplex_reg = B53_DUPLEX_STAT_GE, 2342 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2343 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2344 }, 2345 }; 2346 2347 static int b53_switch_init(struct b53_device *dev) 2348 { 2349 unsigned int i; 2350 int ret; 2351 2352 for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) { 2353 const struct b53_chip_data *chip = &b53_switch_chips[i]; 2354 2355 if (chip->chip_id == dev->chip_id) { 2356 if (!dev->enabled_ports) 2357 dev->enabled_ports = chip->enabled_ports; 2358 dev->name = chip->dev_name; 2359 dev->duplex_reg = chip->duplex_reg; 2360 dev->vta_regs[0] = chip->vta_regs[0]; 2361 dev->vta_regs[1] = chip->vta_regs[1]; 2362 dev->vta_regs[2] = chip->vta_regs[2]; 2363 dev->jumbo_pm_reg = chip->jumbo_pm_reg; 2364 dev->cpu_port = chip->cpu_port; 2365 dev->num_vlans = chip->vlans; 2366 dev->num_arl_entries = chip->arl_entries; 2367 break; 2368 } 2369 } 2370 2371 /* check which BCM5325x version we have */ 2372 if (is5325(dev)) { 2373 u8 vc4; 2374 2375 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4); 2376 2377 /* check reserved bits */ 2378 switch (vc4 & 3) { 2379 case 1: 2380 /* BCM5325E */ 2381 break; 2382 case 3: 2383 /* BCM5325F - do not use port 4 */ 2384 dev->enabled_ports &= ~BIT(4); 2385 break; 2386 default: 2387 /* On the BCM47XX SoCs this is the supported internal switch.*/ 2388 #ifndef CONFIG_BCM47XX 2389 /* BCM5325M */ 2390 return -EINVAL; 2391 #else 2392 break; 2393 #endif 2394 } 2395 } else if (dev->chip_id == BCM53115_DEVICE_ID) { 2396 u64 strap_value; 2397 2398 b53_read48(dev, B53_STAT_PAGE, B53_STRAP_VALUE, &strap_value); 2399 /* use second IMP port if GMII is enabled */ 2400 if (strap_value & SV_GMII_CTRL_115) 2401 dev->cpu_port = 5; 2402 } 2403 2404 /* cpu port is always last */ 2405 dev->num_ports = dev->cpu_port + 1; 2406 dev->enabled_ports |= BIT(dev->cpu_port); 2407 2408 /* Include non standard CPU port built-in PHYs to be probed */ 2409 if (is539x(dev) || is531x5(dev)) { 2410 for (i = 0; i < dev->num_ports; i++) { 2411 if (!(dev->ds->phys_mii_mask & BIT(i)) && 2412 !b53_possible_cpu_port(dev->ds, i)) 2413 dev->ds->phys_mii_mask |= BIT(i); 2414 } 2415 } 2416 2417 dev->ports = devm_kcalloc(dev->dev, 2418 dev->num_ports, sizeof(struct b53_port), 2419 GFP_KERNEL); 2420 if (!dev->ports) 2421 return -ENOMEM; 2422 2423 dev->vlans = devm_kcalloc(dev->dev, 2424 dev->num_vlans, sizeof(struct b53_vlan), 2425 GFP_KERNEL); 2426 if (!dev->vlans) 2427 return -ENOMEM; 2428 2429 dev->reset_gpio = b53_switch_get_reset_gpio(dev); 2430 if (dev->reset_gpio >= 0) { 2431 ret = devm_gpio_request_one(dev->dev, dev->reset_gpio, 2432 GPIOF_OUT_INIT_HIGH, "robo_reset"); 2433 if (ret) 2434 return ret; 2435 } 2436 2437 return 0; 2438 } 2439 2440 struct b53_device *b53_switch_alloc(struct device *base, 2441 const struct b53_io_ops *ops, 2442 void *priv) 2443 { 2444 struct dsa_switch *ds; 2445 struct b53_device *dev; 2446 2447 ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL); 2448 if (!ds) 2449 return NULL; 2450 2451 ds->dev = base; 2452 ds->num_ports = DSA_MAX_PORTS; 2453 2454 dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL); 2455 if (!dev) 2456 return NULL; 2457 2458 ds->priv = dev; 2459 dev->dev = base; 2460 2461 dev->ds = ds; 2462 dev->priv = priv; 2463 dev->ops = ops; 2464 ds->ops = &b53_switch_ops; 2465 mutex_init(&dev->reg_mutex); 2466 mutex_init(&dev->stats_mutex); 2467 2468 return dev; 2469 } 2470 EXPORT_SYMBOL(b53_switch_alloc); 2471 2472 int b53_switch_detect(struct b53_device *dev) 2473 { 2474 u32 id32; 2475 u16 tmp; 2476 u8 id8; 2477 int ret; 2478 2479 ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8); 2480 if (ret) 2481 return ret; 2482 2483 switch (id8) { 2484 case 0: 2485 /* BCM5325 and BCM5365 do not have this register so reads 2486 * return 0. But the read operation did succeed, so assume this 2487 * is one of them. 2488 * 2489 * Next check if we can write to the 5325's VTA register; for 2490 * 5365 it is read only. 2491 */ 2492 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf); 2493 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp); 2494 2495 if (tmp == 0xf) 2496 dev->chip_id = BCM5325_DEVICE_ID; 2497 else 2498 dev->chip_id = BCM5365_DEVICE_ID; 2499 break; 2500 case BCM5389_DEVICE_ID: 2501 case BCM5395_DEVICE_ID: 2502 case BCM5397_DEVICE_ID: 2503 case BCM5398_DEVICE_ID: 2504 dev->chip_id = id8; 2505 break; 2506 default: 2507 ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32); 2508 if (ret) 2509 return ret; 2510 2511 switch (id32) { 2512 case BCM53115_DEVICE_ID: 2513 case BCM53125_DEVICE_ID: 2514 case BCM53128_DEVICE_ID: 2515 case BCM53010_DEVICE_ID: 2516 case BCM53011_DEVICE_ID: 2517 case BCM53012_DEVICE_ID: 2518 case BCM53018_DEVICE_ID: 2519 case BCM53019_DEVICE_ID: 2520 dev->chip_id = id32; 2521 break; 2522 default: 2523 pr_err("unsupported switch detected (BCM53%02x/BCM%x)\n", 2524 id8, id32); 2525 return -ENODEV; 2526 } 2527 } 2528 2529 if (dev->chip_id == BCM5325_DEVICE_ID) 2530 return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25, 2531 &dev->core_rev); 2532 else 2533 return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID, 2534 &dev->core_rev); 2535 } 2536 EXPORT_SYMBOL(b53_switch_detect); 2537 2538 int b53_switch_register(struct b53_device *dev) 2539 { 2540 int ret; 2541 2542 if (dev->pdata) { 2543 dev->chip_id = dev->pdata->chip_id; 2544 dev->enabled_ports = dev->pdata->enabled_ports; 2545 } 2546 2547 if (!dev->chip_id && b53_switch_detect(dev)) 2548 return -EINVAL; 2549 2550 ret = b53_switch_init(dev); 2551 if (ret) 2552 return ret; 2553 2554 pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev); 2555 2556 return dsa_register_switch(dev->ds); 2557 } 2558 EXPORT_SYMBOL(b53_switch_register); 2559 2560 MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>"); 2561 MODULE_DESCRIPTION("B53 switch library"); 2562 MODULE_LICENSE("Dual BSD/GPL"); 2563