1 /* 2 * B53 switch driver main logic 3 * 4 * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org> 5 * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com> 6 * 7 * Permission to use, copy, modify, and/or distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21 22 #include <linux/delay.h> 23 #include <linux/export.h> 24 #include <linux/gpio.h> 25 #include <linux/kernel.h> 26 #include <linux/module.h> 27 #include <linux/platform_data/b53.h> 28 #include <linux/phy.h> 29 #include <linux/etherdevice.h> 30 #include <linux/if_bridge.h> 31 #include <net/dsa.h> 32 #include <net/switchdev.h> 33 34 #include "b53_regs.h" 35 #include "b53_priv.h" 36 37 struct b53_mib_desc { 38 u8 size; 39 u8 offset; 40 const char *name; 41 }; 42 43 /* BCM5365 MIB counters */ 44 static const struct b53_mib_desc b53_mibs_65[] = { 45 { 8, 0x00, "TxOctets" }, 46 { 4, 0x08, "TxDropPkts" }, 47 { 4, 0x10, "TxBroadcastPkts" }, 48 { 4, 0x14, "TxMulticastPkts" }, 49 { 4, 0x18, "TxUnicastPkts" }, 50 { 4, 0x1c, "TxCollisions" }, 51 { 4, 0x20, "TxSingleCollision" }, 52 { 4, 0x24, "TxMultipleCollision" }, 53 { 4, 0x28, "TxDeferredTransmit" }, 54 { 4, 0x2c, "TxLateCollision" }, 55 { 4, 0x30, "TxExcessiveCollision" }, 56 { 4, 0x38, "TxPausePkts" }, 57 { 8, 0x44, "RxOctets" }, 58 { 4, 0x4c, "RxUndersizePkts" }, 59 { 4, 0x50, "RxPausePkts" }, 60 { 4, 0x54, "Pkts64Octets" }, 61 { 4, 0x58, "Pkts65to127Octets" }, 62 { 4, 0x5c, "Pkts128to255Octets" }, 63 { 4, 0x60, "Pkts256to511Octets" }, 64 { 4, 0x64, "Pkts512to1023Octets" }, 65 { 4, 0x68, "Pkts1024to1522Octets" }, 66 { 4, 0x6c, "RxOversizePkts" }, 67 { 4, 0x70, "RxJabbers" }, 68 { 4, 0x74, "RxAlignmentErrors" }, 69 { 4, 0x78, "RxFCSErrors" }, 70 { 8, 0x7c, "RxGoodOctets" }, 71 { 4, 0x84, "RxDropPkts" }, 72 { 4, 0x88, "RxUnicastPkts" }, 73 { 4, 0x8c, "RxMulticastPkts" }, 74 { 4, 0x90, "RxBroadcastPkts" }, 75 { 4, 0x94, "RxSAChanges" }, 76 { 4, 0x98, "RxFragments" }, 77 }; 78 79 #define B53_MIBS_65_SIZE ARRAY_SIZE(b53_mibs_65) 80 81 /* BCM63xx MIB counters */ 82 static const struct b53_mib_desc b53_mibs_63xx[] = { 83 { 8, 0x00, "TxOctets" }, 84 { 4, 0x08, "TxDropPkts" }, 85 { 4, 0x0c, "TxQoSPkts" }, 86 { 4, 0x10, "TxBroadcastPkts" }, 87 { 4, 0x14, "TxMulticastPkts" }, 88 { 4, 0x18, "TxUnicastPkts" }, 89 { 4, 0x1c, "TxCollisions" }, 90 { 4, 0x20, "TxSingleCollision" }, 91 { 4, 0x24, "TxMultipleCollision" }, 92 { 4, 0x28, "TxDeferredTransmit" }, 93 { 4, 0x2c, "TxLateCollision" }, 94 { 4, 0x30, "TxExcessiveCollision" }, 95 { 4, 0x38, "TxPausePkts" }, 96 { 8, 0x3c, "TxQoSOctets" }, 97 { 8, 0x44, "RxOctets" }, 98 { 4, 0x4c, "RxUndersizePkts" }, 99 { 4, 0x50, "RxPausePkts" }, 100 { 4, 0x54, "Pkts64Octets" }, 101 { 4, 0x58, "Pkts65to127Octets" }, 102 { 4, 0x5c, "Pkts128to255Octets" }, 103 { 4, 0x60, "Pkts256to511Octets" }, 104 { 4, 0x64, "Pkts512to1023Octets" }, 105 { 4, 0x68, "Pkts1024to1522Octets" }, 106 { 4, 0x6c, "RxOversizePkts" }, 107 { 4, 0x70, "RxJabbers" }, 108 { 4, 0x74, "RxAlignmentErrors" }, 109 { 4, 0x78, "RxFCSErrors" }, 110 { 8, 0x7c, "RxGoodOctets" }, 111 { 4, 0x84, "RxDropPkts" }, 112 { 4, 0x88, "RxUnicastPkts" }, 113 { 4, 0x8c, "RxMulticastPkts" }, 114 { 4, 0x90, "RxBroadcastPkts" }, 115 { 4, 0x94, "RxSAChanges" }, 116 { 4, 0x98, "RxFragments" }, 117 { 4, 0xa0, "RxSymbolErrors" }, 118 { 4, 0xa4, "RxQoSPkts" }, 119 { 8, 0xa8, "RxQoSOctets" }, 120 { 4, 0xb0, "Pkts1523to2047Octets" }, 121 { 4, 0xb4, "Pkts2048to4095Octets" }, 122 { 4, 0xb8, "Pkts4096to8191Octets" }, 123 { 4, 0xbc, "Pkts8192to9728Octets" }, 124 { 4, 0xc0, "RxDiscarded" }, 125 }; 126 127 #define B53_MIBS_63XX_SIZE ARRAY_SIZE(b53_mibs_63xx) 128 129 /* MIB counters */ 130 static const struct b53_mib_desc b53_mibs[] = { 131 { 8, 0x00, "TxOctets" }, 132 { 4, 0x08, "TxDropPkts" }, 133 { 4, 0x10, "TxBroadcastPkts" }, 134 { 4, 0x14, "TxMulticastPkts" }, 135 { 4, 0x18, "TxUnicastPkts" }, 136 { 4, 0x1c, "TxCollisions" }, 137 { 4, 0x20, "TxSingleCollision" }, 138 { 4, 0x24, "TxMultipleCollision" }, 139 { 4, 0x28, "TxDeferredTransmit" }, 140 { 4, 0x2c, "TxLateCollision" }, 141 { 4, 0x30, "TxExcessiveCollision" }, 142 { 4, 0x38, "TxPausePkts" }, 143 { 8, 0x50, "RxOctets" }, 144 { 4, 0x58, "RxUndersizePkts" }, 145 { 4, 0x5c, "RxPausePkts" }, 146 { 4, 0x60, "Pkts64Octets" }, 147 { 4, 0x64, "Pkts65to127Octets" }, 148 { 4, 0x68, "Pkts128to255Octets" }, 149 { 4, 0x6c, "Pkts256to511Octets" }, 150 { 4, 0x70, "Pkts512to1023Octets" }, 151 { 4, 0x74, "Pkts1024to1522Octets" }, 152 { 4, 0x78, "RxOversizePkts" }, 153 { 4, 0x7c, "RxJabbers" }, 154 { 4, 0x80, "RxAlignmentErrors" }, 155 { 4, 0x84, "RxFCSErrors" }, 156 { 8, 0x88, "RxGoodOctets" }, 157 { 4, 0x90, "RxDropPkts" }, 158 { 4, 0x94, "RxUnicastPkts" }, 159 { 4, 0x98, "RxMulticastPkts" }, 160 { 4, 0x9c, "RxBroadcastPkts" }, 161 { 4, 0xa0, "RxSAChanges" }, 162 { 4, 0xa4, "RxFragments" }, 163 { 4, 0xa8, "RxJumboPkts" }, 164 { 4, 0xac, "RxSymbolErrors" }, 165 { 4, 0xc0, "RxDiscarded" }, 166 }; 167 168 #define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs) 169 170 static const struct b53_mib_desc b53_mibs_58xx[] = { 171 { 8, 0x00, "TxOctets" }, 172 { 4, 0x08, "TxDropPkts" }, 173 { 4, 0x0c, "TxQPKTQ0" }, 174 { 4, 0x10, "TxBroadcastPkts" }, 175 { 4, 0x14, "TxMulticastPkts" }, 176 { 4, 0x18, "TxUnicastPKts" }, 177 { 4, 0x1c, "TxCollisions" }, 178 { 4, 0x20, "TxSingleCollision" }, 179 { 4, 0x24, "TxMultipleCollision" }, 180 { 4, 0x28, "TxDeferredCollision" }, 181 { 4, 0x2c, "TxLateCollision" }, 182 { 4, 0x30, "TxExcessiveCollision" }, 183 { 4, 0x34, "TxFrameInDisc" }, 184 { 4, 0x38, "TxPausePkts" }, 185 { 4, 0x3c, "TxQPKTQ1" }, 186 { 4, 0x40, "TxQPKTQ2" }, 187 { 4, 0x44, "TxQPKTQ3" }, 188 { 4, 0x48, "TxQPKTQ4" }, 189 { 4, 0x4c, "TxQPKTQ5" }, 190 { 8, 0x50, "RxOctets" }, 191 { 4, 0x58, "RxUndersizePkts" }, 192 { 4, 0x5c, "RxPausePkts" }, 193 { 4, 0x60, "RxPkts64Octets" }, 194 { 4, 0x64, "RxPkts65to127Octets" }, 195 { 4, 0x68, "RxPkts128to255Octets" }, 196 { 4, 0x6c, "RxPkts256to511Octets" }, 197 { 4, 0x70, "RxPkts512to1023Octets" }, 198 { 4, 0x74, "RxPkts1024toMaxPktsOctets" }, 199 { 4, 0x78, "RxOversizePkts" }, 200 { 4, 0x7c, "RxJabbers" }, 201 { 4, 0x80, "RxAlignmentErrors" }, 202 { 4, 0x84, "RxFCSErrors" }, 203 { 8, 0x88, "RxGoodOctets" }, 204 { 4, 0x90, "RxDropPkts" }, 205 { 4, 0x94, "RxUnicastPkts" }, 206 { 4, 0x98, "RxMulticastPkts" }, 207 { 4, 0x9c, "RxBroadcastPkts" }, 208 { 4, 0xa0, "RxSAChanges" }, 209 { 4, 0xa4, "RxFragments" }, 210 { 4, 0xa8, "RxJumboPkt" }, 211 { 4, 0xac, "RxSymblErr" }, 212 { 4, 0xb0, "InRangeErrCount" }, 213 { 4, 0xb4, "OutRangeErrCount" }, 214 { 4, 0xb8, "EEELpiEvent" }, 215 { 4, 0xbc, "EEELpiDuration" }, 216 { 4, 0xc0, "RxDiscard" }, 217 { 4, 0xc8, "TxQPKTQ6" }, 218 { 4, 0xcc, "TxQPKTQ7" }, 219 { 4, 0xd0, "TxPkts64Octets" }, 220 { 4, 0xd4, "TxPkts65to127Octets" }, 221 { 4, 0xd8, "TxPkts128to255Octets" }, 222 { 4, 0xdc, "TxPkts256to511Ocets" }, 223 { 4, 0xe0, "TxPkts512to1023Ocets" }, 224 { 4, 0xe4, "TxPkts1024toMaxPktOcets" }, 225 }; 226 227 #define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx) 228 229 static int b53_do_vlan_op(struct b53_device *dev, u8 op) 230 { 231 unsigned int i; 232 233 b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op); 234 235 for (i = 0; i < 10; i++) { 236 u8 vta; 237 238 b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta); 239 if (!(vta & VTA_START_CMD)) 240 return 0; 241 242 usleep_range(100, 200); 243 } 244 245 return -EIO; 246 } 247 248 static void b53_set_vlan_entry(struct b53_device *dev, u16 vid, 249 struct b53_vlan *vlan) 250 { 251 if (is5325(dev)) { 252 u32 entry = 0; 253 254 if (vlan->members) { 255 entry = ((vlan->untag & VA_UNTAG_MASK_25) << 256 VA_UNTAG_S_25) | vlan->members; 257 if (dev->core_rev >= 3) 258 entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S; 259 else 260 entry |= VA_VALID_25; 261 } 262 263 b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry); 264 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid | 265 VTA_RW_STATE_WR | VTA_RW_OP_EN); 266 } else if (is5365(dev)) { 267 u16 entry = 0; 268 269 if (vlan->members) 270 entry = ((vlan->untag & VA_UNTAG_MASK_65) << 271 VA_UNTAG_S_65) | vlan->members | VA_VALID_65; 272 273 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry); 274 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid | 275 VTA_RW_STATE_WR | VTA_RW_OP_EN); 276 } else { 277 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid); 278 b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], 279 (vlan->untag << VTE_UNTAG_S) | vlan->members); 280 281 b53_do_vlan_op(dev, VTA_CMD_WRITE); 282 } 283 284 dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n", 285 vid, vlan->members, vlan->untag); 286 } 287 288 static void b53_get_vlan_entry(struct b53_device *dev, u16 vid, 289 struct b53_vlan *vlan) 290 { 291 if (is5325(dev)) { 292 u32 entry = 0; 293 294 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid | 295 VTA_RW_STATE_RD | VTA_RW_OP_EN); 296 b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry); 297 298 if (dev->core_rev >= 3) 299 vlan->valid = !!(entry & VA_VALID_25_R4); 300 else 301 vlan->valid = !!(entry & VA_VALID_25); 302 vlan->members = entry & VA_MEMBER_MASK; 303 vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25; 304 305 } else if (is5365(dev)) { 306 u16 entry = 0; 307 308 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid | 309 VTA_RW_STATE_WR | VTA_RW_OP_EN); 310 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry); 311 312 vlan->valid = !!(entry & VA_VALID_65); 313 vlan->members = entry & VA_MEMBER_MASK; 314 vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65; 315 } else { 316 u32 entry = 0; 317 318 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid); 319 b53_do_vlan_op(dev, VTA_CMD_READ); 320 b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry); 321 vlan->members = entry & VTE_MEMBERS; 322 vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS; 323 vlan->valid = true; 324 } 325 } 326 327 static void b53_set_forwarding(struct b53_device *dev, int enable) 328 { 329 u8 mgmt; 330 331 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 332 333 if (enable) 334 mgmt |= SM_SW_FWD_EN; 335 else 336 mgmt &= ~SM_SW_FWD_EN; 337 338 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 339 } 340 341 static void b53_enable_vlan(struct b53_device *dev, bool enable) 342 { 343 u8 mgmt, vc0, vc1, vc4 = 0, vc5; 344 345 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 346 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0); 347 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1); 348 349 if (is5325(dev) || is5365(dev)) { 350 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4); 351 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5); 352 } else if (is63xx(dev)) { 353 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4); 354 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5); 355 } else { 356 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4); 357 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5); 358 } 359 360 mgmt &= ~SM_SW_FWD_MODE; 361 362 if (enable) { 363 vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; 364 vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN; 365 vc4 &= ~VC4_ING_VID_CHECK_MASK; 366 vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; 367 vc5 |= VC5_DROP_VTABLE_MISS; 368 369 if (is5325(dev)) 370 vc0 &= ~VC0_RESERVED_1; 371 372 if (is5325(dev) || is5365(dev)) 373 vc1 |= VC1_RX_MCST_TAG_EN; 374 375 } else { 376 vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID); 377 vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN); 378 vc4 &= ~VC4_ING_VID_CHECK_MASK; 379 vc5 &= ~VC5_DROP_VTABLE_MISS; 380 381 if (is5325(dev) || is5365(dev)) 382 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; 383 else 384 vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S; 385 386 if (is5325(dev) || is5365(dev)) 387 vc1 &= ~VC1_RX_MCST_TAG_EN; 388 } 389 390 if (!is5325(dev) && !is5365(dev)) 391 vc5 &= ~VC5_VID_FFF_EN; 392 393 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0); 394 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1); 395 396 if (is5325(dev) || is5365(dev)) { 397 /* enable the high 8 bit vid check on 5325 */ 398 if (is5325(dev) && enable) 399 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 400 VC3_HIGH_8BIT_EN); 401 else 402 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0); 403 404 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4); 405 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5); 406 } else if (is63xx(dev)) { 407 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0); 408 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4); 409 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5); 410 } else { 411 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0); 412 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4); 413 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5); 414 } 415 416 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 417 } 418 419 static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) 420 { 421 u32 port_mask = 0; 422 u16 max_size = JMS_MIN_SIZE; 423 424 if (is5325(dev) || is5365(dev)) 425 return -EINVAL; 426 427 if (enable) { 428 port_mask = dev->enabled_ports; 429 max_size = JMS_MAX_SIZE; 430 if (allow_10_100) 431 port_mask |= JPM_10_100_JUMBO_EN; 432 } 433 434 b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask); 435 return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size); 436 } 437 438 static int b53_flush_arl(struct b53_device *dev, u8 mask) 439 { 440 unsigned int i; 441 442 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, 443 FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask); 444 445 for (i = 0; i < 10; i++) { 446 u8 fast_age_ctrl; 447 448 b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, 449 &fast_age_ctrl); 450 451 if (!(fast_age_ctrl & FAST_AGE_DONE)) 452 goto out; 453 454 msleep(1); 455 } 456 457 return -ETIMEDOUT; 458 out: 459 /* Only age dynamic entries (default behavior) */ 460 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC); 461 return 0; 462 } 463 464 static int b53_fast_age_port(struct b53_device *dev, int port) 465 { 466 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port); 467 468 return b53_flush_arl(dev, FAST_AGE_PORT); 469 } 470 471 static int b53_fast_age_vlan(struct b53_device *dev, u16 vid) 472 { 473 b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid); 474 475 return b53_flush_arl(dev, FAST_AGE_VLAN); 476 } 477 478 static void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) 479 { 480 struct b53_device *dev = ds_to_priv(ds); 481 unsigned int i; 482 u16 pvlan; 483 484 /* Enable the IMP port to be in the same VLAN as the other ports 485 * on a per-port basis such that we only have Port i and IMP in 486 * the same VLAN. 487 */ 488 b53_for_each_port(dev, i) { 489 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan); 490 pvlan |= BIT(cpu_port); 491 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan); 492 } 493 } 494 495 static int b53_enable_port(struct dsa_switch *ds, int port, 496 struct phy_device *phy) 497 { 498 struct b53_device *dev = ds_to_priv(ds); 499 unsigned int cpu_port = dev->cpu_port; 500 u16 pvlan; 501 502 /* Clear the Rx and Tx disable bits and set to no spanning tree */ 503 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0); 504 505 /* Set this port, and only this one to be in the default VLAN, 506 * if member of a bridge, restore its membership prior to 507 * bringing down this port. 508 */ 509 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 510 pvlan &= ~0x1ff; 511 pvlan |= BIT(port); 512 pvlan |= dev->ports[port].vlan_ctl_mask; 513 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 514 515 b53_imp_vlan_setup(ds, cpu_port); 516 517 return 0; 518 } 519 520 static void b53_disable_port(struct dsa_switch *ds, int port, 521 struct phy_device *phy) 522 { 523 struct b53_device *dev = ds_to_priv(ds); 524 u8 reg; 525 526 /* Disable Tx/Rx for the port */ 527 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); 528 reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE; 529 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); 530 } 531 532 static void b53_enable_cpu_port(struct b53_device *dev) 533 { 534 unsigned int cpu_port = dev->cpu_port; 535 u8 port_ctrl; 536 537 /* BCM5325 CPU port is at 8 */ 538 if ((is5325(dev) || is5365(dev)) && cpu_port == B53_CPU_PORT_25) 539 cpu_port = B53_CPU_PORT; 540 541 port_ctrl = PORT_CTRL_RX_BCST_EN | 542 PORT_CTRL_RX_MCST_EN | 543 PORT_CTRL_RX_UCST_EN; 544 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(cpu_port), port_ctrl); 545 } 546 547 static void b53_enable_mib(struct b53_device *dev) 548 { 549 u8 gc; 550 551 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 552 gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN); 553 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); 554 } 555 556 static int b53_configure_vlan(struct b53_device *dev) 557 { 558 struct b53_vlan vl = { 0 }; 559 int i; 560 561 /* clear all vlan entries */ 562 if (is5325(dev) || is5365(dev)) { 563 for (i = 1; i < dev->num_vlans; i++) 564 b53_set_vlan_entry(dev, i, &vl); 565 } else { 566 b53_do_vlan_op(dev, VTA_CMD_CLEAR); 567 } 568 569 b53_enable_vlan(dev, false); 570 571 b53_for_each_port(dev, i) 572 b53_write16(dev, B53_VLAN_PAGE, 573 B53_VLAN_PORT_DEF_TAG(i), 1); 574 575 if (!is5325(dev) && !is5365(dev)) 576 b53_set_jumbo(dev, dev->enable_jumbo, false); 577 578 return 0; 579 } 580 581 static void b53_switch_reset_gpio(struct b53_device *dev) 582 { 583 int gpio = dev->reset_gpio; 584 585 if (gpio < 0) 586 return; 587 588 /* Reset sequence: RESET low(50ms)->high(20ms) 589 */ 590 gpio_set_value(gpio, 0); 591 mdelay(50); 592 593 gpio_set_value(gpio, 1); 594 mdelay(20); 595 596 dev->current_page = 0xff; 597 } 598 599 static int b53_switch_reset(struct b53_device *dev) 600 { 601 u8 mgmt; 602 603 b53_switch_reset_gpio(dev); 604 605 if (is539x(dev)) { 606 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83); 607 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00); 608 } 609 610 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 611 612 if (!(mgmt & SM_SW_FWD_EN)) { 613 mgmt &= ~SM_SW_FWD_MODE; 614 mgmt |= SM_SW_FWD_EN; 615 616 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 617 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 618 619 if (!(mgmt & SM_SW_FWD_EN)) { 620 dev_err(dev->dev, "Failed to enable switch!\n"); 621 return -EINVAL; 622 } 623 } 624 625 b53_enable_mib(dev); 626 627 return b53_flush_arl(dev, FAST_AGE_STATIC); 628 } 629 630 static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg) 631 { 632 struct b53_device *priv = ds_to_priv(ds); 633 u16 value = 0; 634 int ret; 635 636 if (priv->ops->phy_read16) 637 ret = priv->ops->phy_read16(priv, addr, reg, &value); 638 else 639 ret = b53_read16(priv, B53_PORT_MII_PAGE(addr), 640 reg * 2, &value); 641 642 return ret ? ret : value; 643 } 644 645 static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val) 646 { 647 struct b53_device *priv = ds_to_priv(ds); 648 649 if (priv->ops->phy_write16) 650 return priv->ops->phy_write16(priv, addr, reg, val); 651 652 return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val); 653 } 654 655 static int b53_reset_switch(struct b53_device *priv) 656 { 657 /* reset vlans */ 658 priv->enable_jumbo = false; 659 660 memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans); 661 memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports); 662 663 return b53_switch_reset(priv); 664 } 665 666 static int b53_apply_config(struct b53_device *priv) 667 { 668 /* disable switching */ 669 b53_set_forwarding(priv, 0); 670 671 b53_configure_vlan(priv); 672 673 /* enable switching */ 674 b53_set_forwarding(priv, 1); 675 676 return 0; 677 } 678 679 static void b53_reset_mib(struct b53_device *priv) 680 { 681 u8 gc; 682 683 b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 684 685 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB); 686 msleep(1); 687 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB); 688 msleep(1); 689 } 690 691 static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev) 692 { 693 if (is5365(dev)) 694 return b53_mibs_65; 695 else if (is63xx(dev)) 696 return b53_mibs_63xx; 697 else if (is58xx(dev)) 698 return b53_mibs_58xx; 699 else 700 return b53_mibs; 701 } 702 703 static unsigned int b53_get_mib_size(struct b53_device *dev) 704 { 705 if (is5365(dev)) 706 return B53_MIBS_65_SIZE; 707 else if (is63xx(dev)) 708 return B53_MIBS_63XX_SIZE; 709 else if (is58xx(dev)) 710 return B53_MIBS_58XX_SIZE; 711 else 712 return B53_MIBS_SIZE; 713 } 714 715 static void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data) 716 { 717 struct b53_device *dev = ds_to_priv(ds); 718 const struct b53_mib_desc *mibs = b53_get_mib(dev); 719 unsigned int mib_size = b53_get_mib_size(dev); 720 unsigned int i; 721 722 for (i = 0; i < mib_size; i++) 723 memcpy(data + i * ETH_GSTRING_LEN, 724 mibs[i].name, ETH_GSTRING_LEN); 725 } 726 727 static void b53_get_ethtool_stats(struct dsa_switch *ds, int port, 728 uint64_t *data) 729 { 730 struct b53_device *dev = ds_to_priv(ds); 731 const struct b53_mib_desc *mibs = b53_get_mib(dev); 732 unsigned int mib_size = b53_get_mib_size(dev); 733 const struct b53_mib_desc *s; 734 unsigned int i; 735 u64 val = 0; 736 737 if (is5365(dev) && port == 5) 738 port = 8; 739 740 mutex_lock(&dev->stats_mutex); 741 742 for (i = 0; i < mib_size; i++) { 743 s = &mibs[i]; 744 745 if (s->size == 8) { 746 b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val); 747 } else { 748 u32 val32; 749 750 b53_read32(dev, B53_MIB_PAGE(port), s->offset, 751 &val32); 752 val = val32; 753 } 754 data[i] = (u64)val; 755 } 756 757 mutex_unlock(&dev->stats_mutex); 758 } 759 760 static int b53_get_sset_count(struct dsa_switch *ds) 761 { 762 struct b53_device *dev = ds_to_priv(ds); 763 764 return b53_get_mib_size(dev); 765 } 766 767 static int b53_set_addr(struct dsa_switch *ds, u8 *addr) 768 { 769 return 0; 770 } 771 772 static int b53_setup(struct dsa_switch *ds) 773 { 774 struct b53_device *dev = ds_to_priv(ds); 775 unsigned int port; 776 int ret; 777 778 ret = b53_reset_switch(dev); 779 if (ret) { 780 dev_err(ds->dev, "failed to reset switch\n"); 781 return ret; 782 } 783 784 b53_reset_mib(dev); 785 786 ret = b53_apply_config(dev); 787 if (ret) 788 dev_err(ds->dev, "failed to apply configuration\n"); 789 790 for (port = 0; port < dev->num_ports; port++) { 791 if (BIT(port) & ds->enabled_port_mask) 792 b53_enable_port(ds, port, NULL); 793 else if (dsa_is_cpu_port(ds, port)) 794 b53_enable_cpu_port(dev); 795 else 796 b53_disable_port(ds, port, NULL); 797 } 798 799 return ret; 800 } 801 802 static void b53_adjust_link(struct dsa_switch *ds, int port, 803 struct phy_device *phydev) 804 { 805 struct b53_device *dev = ds_to_priv(ds); 806 u8 rgmii_ctrl = 0, reg = 0, off; 807 808 if (!phy_is_pseudo_fixed_link(phydev)) 809 return; 810 811 /* Override the port settings */ 812 if (port == dev->cpu_port) { 813 off = B53_PORT_OVERRIDE_CTRL; 814 reg = PORT_OVERRIDE_EN; 815 } else { 816 off = B53_GMII_PORT_OVERRIDE_CTRL(port); 817 reg = GMII_PO_EN; 818 } 819 820 /* Set the link UP */ 821 if (phydev->link) 822 reg |= PORT_OVERRIDE_LINK; 823 824 if (phydev->duplex == DUPLEX_FULL) 825 reg |= PORT_OVERRIDE_FULL_DUPLEX; 826 827 switch (phydev->speed) { 828 case 2000: 829 reg |= PORT_OVERRIDE_SPEED_2000M; 830 /* fallthrough */ 831 case SPEED_1000: 832 reg |= PORT_OVERRIDE_SPEED_1000M; 833 break; 834 case SPEED_100: 835 reg |= PORT_OVERRIDE_SPEED_100M; 836 break; 837 case SPEED_10: 838 reg |= PORT_OVERRIDE_SPEED_10M; 839 break; 840 default: 841 dev_err(ds->dev, "unknown speed: %d\n", phydev->speed); 842 return; 843 } 844 845 /* Enable flow control on BCM5301x's CPU port */ 846 if (is5301x(dev) && port == dev->cpu_port) 847 reg |= PORT_OVERRIDE_RX_FLOW | PORT_OVERRIDE_TX_FLOW; 848 849 if (phydev->pause) { 850 if (phydev->asym_pause) 851 reg |= PORT_OVERRIDE_TX_FLOW; 852 reg |= PORT_OVERRIDE_RX_FLOW; 853 } 854 855 b53_write8(dev, B53_CTRL_PAGE, off, reg); 856 857 if (is531x5(dev) && phy_interface_is_rgmii(phydev)) { 858 if (port == 8) 859 off = B53_RGMII_CTRL_IMP; 860 else 861 off = B53_RGMII_CTRL_P(port); 862 863 /* Configure the port RGMII clock delay by DLL disabled and 864 * tx_clk aligned timing (restoring to reset defaults) 865 */ 866 b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl); 867 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC | 868 RGMII_CTRL_TIMING_SEL); 869 870 /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make 871 * sure that we enable the port TX clock internal delay to 872 * account for this internal delay that is inserted, otherwise 873 * the switch won't be able to receive correctly. 874 * 875 * PHY_INTERFACE_MODE_RGMII means that we are not introducing 876 * any delay neither on transmission nor reception, so the 877 * BCM53125 must also be configured accordingly to account for 878 * the lack of delay and introduce 879 * 880 * The BCM53125 switch has its RX clock and TX clock control 881 * swapped, hence the reason why we modify the TX clock path in 882 * the "RGMII" case 883 */ 884 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) 885 rgmii_ctrl |= RGMII_CTRL_DLL_TXC; 886 if (phydev->interface == PHY_INTERFACE_MODE_RGMII) 887 rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC; 888 rgmii_ctrl |= RGMII_CTRL_TIMING_SEL; 889 b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl); 890 891 dev_info(ds->dev, "Configured port %d for %s\n", port, 892 phy_modes(phydev->interface)); 893 } 894 895 /* configure MII port if necessary */ 896 if (is5325(dev)) { 897 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 898 ®); 899 900 /* reverse mii needs to be enabled */ 901 if (!(reg & PORT_OVERRIDE_RV_MII_25)) { 902 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 903 reg | PORT_OVERRIDE_RV_MII_25); 904 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 905 ®); 906 907 if (!(reg & PORT_OVERRIDE_RV_MII_25)) { 908 dev_err(ds->dev, 909 "Failed to enable reverse MII mode\n"); 910 return; 911 } 912 } 913 } else if (is5301x(dev)) { 914 if (port != dev->cpu_port) { 915 u8 po_reg = B53_GMII_PORT_OVERRIDE_CTRL(dev->cpu_port); 916 u8 gmii_po; 917 918 b53_read8(dev, B53_CTRL_PAGE, po_reg, &gmii_po); 919 gmii_po |= GMII_PO_LINK | 920 GMII_PO_RX_FLOW | 921 GMII_PO_TX_FLOW | 922 GMII_PO_EN | 923 GMII_PO_SPEED_2000M; 924 b53_write8(dev, B53_CTRL_PAGE, po_reg, gmii_po); 925 } 926 } 927 } 928 929 static int b53_vlan_filtering(struct dsa_switch *ds, int port, 930 bool vlan_filtering) 931 { 932 return 0; 933 } 934 935 static int b53_vlan_prepare(struct dsa_switch *ds, int port, 936 const struct switchdev_obj_port_vlan *vlan, 937 struct switchdev_trans *trans) 938 { 939 struct b53_device *dev = ds_to_priv(ds); 940 941 if ((is5325(dev) || is5365(dev)) && vlan->vid_begin == 0) 942 return -EOPNOTSUPP; 943 944 if (vlan->vid_end > dev->num_vlans) 945 return -ERANGE; 946 947 b53_enable_vlan(dev, true); 948 949 return 0; 950 } 951 952 static void b53_vlan_add(struct dsa_switch *ds, int port, 953 const struct switchdev_obj_port_vlan *vlan, 954 struct switchdev_trans *trans) 955 { 956 struct b53_device *dev = ds_to_priv(ds); 957 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 958 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 959 unsigned int cpu_port = dev->cpu_port; 960 struct b53_vlan *vl; 961 u16 vid; 962 963 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { 964 vl = &dev->vlans[vid]; 965 966 b53_get_vlan_entry(dev, vid, vl); 967 968 vl->members |= BIT(port) | BIT(cpu_port); 969 if (untagged) 970 vl->untag |= BIT(port) | BIT(cpu_port); 971 else 972 vl->untag &= ~(BIT(port) | BIT(cpu_port)); 973 974 b53_set_vlan_entry(dev, vid, vl); 975 b53_fast_age_vlan(dev, vid); 976 } 977 978 if (pvid) { 979 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), 980 vlan->vid_end); 981 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), 982 vlan->vid_end); 983 b53_fast_age_vlan(dev, vid); 984 } 985 } 986 987 static int b53_vlan_del(struct dsa_switch *ds, int port, 988 const struct switchdev_obj_port_vlan *vlan) 989 { 990 struct b53_device *dev = ds_to_priv(ds); 991 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 992 unsigned int cpu_port = dev->cpu_port; 993 struct b53_vlan *vl; 994 u16 vid; 995 u16 pvid; 996 997 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); 998 999 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { 1000 vl = &dev->vlans[vid]; 1001 1002 b53_get_vlan_entry(dev, vid, vl); 1003 1004 vl->members &= ~BIT(port); 1005 if ((vl->members & BIT(cpu_port)) == BIT(cpu_port)) 1006 vl->members = 0; 1007 1008 if (pvid == vid) { 1009 if (is5325(dev) || is5365(dev)) 1010 pvid = 1; 1011 else 1012 pvid = 0; 1013 } 1014 1015 if (untagged) { 1016 vl->untag &= ~(BIT(port)); 1017 if ((vl->untag & BIT(cpu_port)) == BIT(cpu_port)) 1018 vl->untag = 0; 1019 } 1020 1021 b53_set_vlan_entry(dev, vid, vl); 1022 b53_fast_age_vlan(dev, vid); 1023 } 1024 1025 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid); 1026 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), pvid); 1027 b53_fast_age_vlan(dev, pvid); 1028 1029 return 0; 1030 } 1031 1032 static int b53_vlan_dump(struct dsa_switch *ds, int port, 1033 struct switchdev_obj_port_vlan *vlan, 1034 int (*cb)(struct switchdev_obj *obj)) 1035 { 1036 struct b53_device *dev = ds_to_priv(ds); 1037 u16 vid, vid_start = 0, pvid; 1038 struct b53_vlan *vl; 1039 int err = 0; 1040 1041 if (is5325(dev) || is5365(dev)) 1042 vid_start = 1; 1043 1044 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); 1045 1046 /* Use our software cache for dumps, since we do not have any HW 1047 * operation returning only the used/valid VLANs 1048 */ 1049 for (vid = vid_start; vid < dev->num_vlans; vid++) { 1050 vl = &dev->vlans[vid]; 1051 1052 if (!vl->valid) 1053 continue; 1054 1055 if (!(vl->members & BIT(port))) 1056 continue; 1057 1058 vlan->vid_begin = vlan->vid_end = vid; 1059 vlan->flags = 0; 1060 1061 if (vl->untag & BIT(port)) 1062 vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; 1063 if (pvid == vid) 1064 vlan->flags |= BRIDGE_VLAN_INFO_PVID; 1065 1066 err = cb(&vlan->obj); 1067 if (err) 1068 break; 1069 } 1070 1071 return err; 1072 } 1073 1074 /* Address Resolution Logic routines */ 1075 static int b53_arl_op_wait(struct b53_device *dev) 1076 { 1077 unsigned int timeout = 10; 1078 u8 reg; 1079 1080 do { 1081 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®); 1082 if (!(reg & ARLTBL_START_DONE)) 1083 return 0; 1084 1085 usleep_range(1000, 2000); 1086 } while (timeout--); 1087 1088 dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg); 1089 1090 return -ETIMEDOUT; 1091 } 1092 1093 static int b53_arl_rw_op(struct b53_device *dev, unsigned int op) 1094 { 1095 u8 reg; 1096 1097 if (op > ARLTBL_RW) 1098 return -EINVAL; 1099 1100 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®); 1101 reg |= ARLTBL_START_DONE; 1102 if (op) 1103 reg |= ARLTBL_RW; 1104 else 1105 reg &= ~ARLTBL_RW; 1106 b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg); 1107 1108 return b53_arl_op_wait(dev); 1109 } 1110 1111 static int b53_arl_read(struct b53_device *dev, u64 mac, 1112 u16 vid, struct b53_arl_entry *ent, u8 *idx, 1113 bool is_valid) 1114 { 1115 unsigned int i; 1116 int ret; 1117 1118 ret = b53_arl_op_wait(dev); 1119 if (ret) 1120 return ret; 1121 1122 /* Read the bins */ 1123 for (i = 0; i < dev->num_arl_entries; i++) { 1124 u64 mac_vid; 1125 u32 fwd_entry; 1126 1127 b53_read64(dev, B53_ARLIO_PAGE, 1128 B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid); 1129 b53_read32(dev, B53_ARLIO_PAGE, 1130 B53_ARLTBL_DATA_ENTRY(i), &fwd_entry); 1131 b53_arl_to_entry(ent, mac_vid, fwd_entry); 1132 1133 if (!(fwd_entry & ARLTBL_VALID)) 1134 continue; 1135 if ((mac_vid & ARLTBL_MAC_MASK) != mac) 1136 continue; 1137 *idx = i; 1138 } 1139 1140 return -ENOENT; 1141 } 1142 1143 static int b53_arl_op(struct b53_device *dev, int op, int port, 1144 const unsigned char *addr, u16 vid, bool is_valid) 1145 { 1146 struct b53_arl_entry ent; 1147 u32 fwd_entry; 1148 u64 mac, mac_vid = 0; 1149 u8 idx = 0; 1150 int ret; 1151 1152 /* Convert the array into a 64-bit MAC */ 1153 mac = b53_mac_to_u64(addr); 1154 1155 /* Perform a read for the given MAC and VID */ 1156 b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac); 1157 b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid); 1158 1159 /* Issue a read operation for this MAC */ 1160 ret = b53_arl_rw_op(dev, 1); 1161 if (ret) 1162 return ret; 1163 1164 ret = b53_arl_read(dev, mac, vid, &ent, &idx, is_valid); 1165 /* If this is a read, just finish now */ 1166 if (op) 1167 return ret; 1168 1169 /* We could not find a matching MAC, so reset to a new entry */ 1170 if (ret) { 1171 fwd_entry = 0; 1172 idx = 1; 1173 } 1174 1175 memset(&ent, 0, sizeof(ent)); 1176 ent.port = port; 1177 ent.is_valid = is_valid; 1178 ent.vid = vid; 1179 ent.is_static = true; 1180 memcpy(ent.mac, addr, ETH_ALEN); 1181 b53_arl_from_entry(&mac_vid, &fwd_entry, &ent); 1182 1183 b53_write64(dev, B53_ARLIO_PAGE, 1184 B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid); 1185 b53_write32(dev, B53_ARLIO_PAGE, 1186 B53_ARLTBL_DATA_ENTRY(idx), fwd_entry); 1187 1188 return b53_arl_rw_op(dev, 0); 1189 } 1190 1191 static int b53_fdb_prepare(struct dsa_switch *ds, int port, 1192 const struct switchdev_obj_port_fdb *fdb, 1193 struct switchdev_trans *trans) 1194 { 1195 struct b53_device *priv = ds_to_priv(ds); 1196 1197 /* 5325 and 5365 require some more massaging, but could 1198 * be supported eventually 1199 */ 1200 if (is5325(priv) || is5365(priv)) 1201 return -EOPNOTSUPP; 1202 1203 return 0; 1204 } 1205 1206 static void b53_fdb_add(struct dsa_switch *ds, int port, 1207 const struct switchdev_obj_port_fdb *fdb, 1208 struct switchdev_trans *trans) 1209 { 1210 struct b53_device *priv = ds_to_priv(ds); 1211 1212 if (b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, true)) 1213 pr_err("%s: failed to add MAC address\n", __func__); 1214 } 1215 1216 static int b53_fdb_del(struct dsa_switch *ds, int port, 1217 const struct switchdev_obj_port_fdb *fdb) 1218 { 1219 struct b53_device *priv = ds_to_priv(ds); 1220 1221 return b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, false); 1222 } 1223 1224 static int b53_arl_search_wait(struct b53_device *dev) 1225 { 1226 unsigned int timeout = 1000; 1227 u8 reg; 1228 1229 do { 1230 b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, ®); 1231 if (!(reg & ARL_SRCH_STDN)) 1232 return 0; 1233 1234 if (reg & ARL_SRCH_VLID) 1235 return 0; 1236 1237 usleep_range(1000, 2000); 1238 } while (timeout--); 1239 1240 return -ETIMEDOUT; 1241 } 1242 1243 static void b53_arl_search_rd(struct b53_device *dev, u8 idx, 1244 struct b53_arl_entry *ent) 1245 { 1246 u64 mac_vid; 1247 u32 fwd_entry; 1248 1249 b53_read64(dev, B53_ARLIO_PAGE, 1250 B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid); 1251 b53_read32(dev, B53_ARLIO_PAGE, 1252 B53_ARL_SRCH_RSTL(idx), &fwd_entry); 1253 b53_arl_to_entry(ent, mac_vid, fwd_entry); 1254 } 1255 1256 static int b53_fdb_copy(struct net_device *dev, int port, 1257 const struct b53_arl_entry *ent, 1258 struct switchdev_obj_port_fdb *fdb, 1259 int (*cb)(struct switchdev_obj *obj)) 1260 { 1261 if (!ent->is_valid) 1262 return 0; 1263 1264 if (port != ent->port) 1265 return 0; 1266 1267 ether_addr_copy(fdb->addr, ent->mac); 1268 fdb->vid = ent->vid; 1269 fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE; 1270 1271 return cb(&fdb->obj); 1272 } 1273 1274 static int b53_fdb_dump(struct dsa_switch *ds, int port, 1275 struct switchdev_obj_port_fdb *fdb, 1276 int (*cb)(struct switchdev_obj *obj)) 1277 { 1278 struct b53_device *priv = ds_to_priv(ds); 1279 struct net_device *dev = ds->ports[port].netdev; 1280 struct b53_arl_entry results[2]; 1281 unsigned int count = 0; 1282 int ret; 1283 u8 reg; 1284 1285 /* Start search operation */ 1286 reg = ARL_SRCH_STDN; 1287 b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg); 1288 1289 do { 1290 ret = b53_arl_search_wait(priv); 1291 if (ret) 1292 return ret; 1293 1294 b53_arl_search_rd(priv, 0, &results[0]); 1295 ret = b53_fdb_copy(dev, port, &results[0], fdb, cb); 1296 if (ret) 1297 return ret; 1298 1299 if (priv->num_arl_entries > 2) { 1300 b53_arl_search_rd(priv, 1, &results[1]); 1301 ret = b53_fdb_copy(dev, port, &results[1], fdb, cb); 1302 if (ret) 1303 return ret; 1304 1305 if (!results[0].is_valid && !results[1].is_valid) 1306 break; 1307 } 1308 1309 } while (count++ < 1024); 1310 1311 return 0; 1312 } 1313 1314 static int b53_br_join(struct dsa_switch *ds, int port, 1315 struct net_device *bridge) 1316 { 1317 struct b53_device *dev = ds_to_priv(ds); 1318 u16 pvlan, reg; 1319 unsigned int i; 1320 1321 dev->ports[port].bridge_dev = bridge; 1322 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 1323 1324 b53_for_each_port(dev, i) { 1325 if (dev->ports[i].bridge_dev != bridge) 1326 continue; 1327 1328 /* Add this local port to the remote port VLAN control 1329 * membership and update the remote port bitmask 1330 */ 1331 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); 1332 reg |= BIT(port); 1333 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg); 1334 dev->ports[i].vlan_ctl_mask = reg; 1335 1336 pvlan |= BIT(i); 1337 } 1338 1339 /* Configure the local port VLAN control membership to include 1340 * remote ports and update the local port bitmask 1341 */ 1342 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 1343 dev->ports[port].vlan_ctl_mask = pvlan; 1344 1345 return 0; 1346 } 1347 1348 static void b53_br_leave(struct dsa_switch *ds, int port) 1349 { 1350 struct b53_device *dev = ds_to_priv(ds); 1351 struct net_device *bridge = dev->ports[port].bridge_dev; 1352 struct b53_vlan *vl = &dev->vlans[0]; 1353 unsigned int i; 1354 u16 pvlan, reg, pvid; 1355 1356 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 1357 1358 b53_for_each_port(dev, i) { 1359 /* Don't touch the remaining ports */ 1360 if (dev->ports[i].bridge_dev != bridge) 1361 continue; 1362 1363 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); 1364 reg &= ~BIT(port); 1365 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg); 1366 dev->ports[port].vlan_ctl_mask = reg; 1367 1368 /* Prevent self removal to preserve isolation */ 1369 if (port != i) 1370 pvlan &= ~BIT(i); 1371 } 1372 1373 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 1374 dev->ports[port].vlan_ctl_mask = pvlan; 1375 dev->ports[port].bridge_dev = NULL; 1376 1377 if (is5325(dev) || is5365(dev)) 1378 pvid = 1; 1379 else 1380 pvid = 0; 1381 1382 b53_get_vlan_entry(dev, pvid, vl); 1383 vl->members |= BIT(port) | BIT(dev->cpu_port); 1384 vl->untag |= BIT(port) | BIT(dev->cpu_port); 1385 b53_set_vlan_entry(dev, pvid, vl); 1386 } 1387 1388 static void b53_br_set_stp_state(struct dsa_switch *ds, int port, 1389 u8 state) 1390 { 1391 struct b53_device *dev = ds_to_priv(ds); 1392 u8 hw_state, cur_hw_state; 1393 u8 reg; 1394 1395 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); 1396 cur_hw_state = reg & PORT_CTRL_STP_STATE_MASK; 1397 1398 switch (state) { 1399 case BR_STATE_DISABLED: 1400 hw_state = PORT_CTRL_DIS_STATE; 1401 break; 1402 case BR_STATE_LISTENING: 1403 hw_state = PORT_CTRL_LISTEN_STATE; 1404 break; 1405 case BR_STATE_LEARNING: 1406 hw_state = PORT_CTRL_LEARN_STATE; 1407 break; 1408 case BR_STATE_FORWARDING: 1409 hw_state = PORT_CTRL_FWD_STATE; 1410 break; 1411 case BR_STATE_BLOCKING: 1412 hw_state = PORT_CTRL_BLOCK_STATE; 1413 break; 1414 default: 1415 dev_err(ds->dev, "invalid STP state: %d\n", state); 1416 return; 1417 } 1418 1419 /* Fast-age ARL entries if we are moving a port from Learning or 1420 * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening 1421 * state (hw_state) 1422 */ 1423 if (cur_hw_state != hw_state) { 1424 if (cur_hw_state >= PORT_CTRL_LEARN_STATE && 1425 hw_state <= PORT_CTRL_LISTEN_STATE) { 1426 if (b53_fast_age_port(dev, port)) { 1427 dev_err(ds->dev, "fast ageing failed\n"); 1428 return; 1429 } 1430 } 1431 } 1432 1433 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); 1434 reg &= ~PORT_CTRL_STP_STATE_MASK; 1435 reg |= hw_state; 1436 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); 1437 } 1438 1439 static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds) 1440 { 1441 return DSA_TAG_PROTO_NONE; 1442 } 1443 1444 static struct dsa_switch_ops b53_switch_ops = { 1445 .get_tag_protocol = b53_get_tag_protocol, 1446 .setup = b53_setup, 1447 .set_addr = b53_set_addr, 1448 .get_strings = b53_get_strings, 1449 .get_ethtool_stats = b53_get_ethtool_stats, 1450 .get_sset_count = b53_get_sset_count, 1451 .phy_read = b53_phy_read16, 1452 .phy_write = b53_phy_write16, 1453 .adjust_link = b53_adjust_link, 1454 .port_enable = b53_enable_port, 1455 .port_disable = b53_disable_port, 1456 .port_bridge_join = b53_br_join, 1457 .port_bridge_leave = b53_br_leave, 1458 .port_stp_state_set = b53_br_set_stp_state, 1459 .port_vlan_filtering = b53_vlan_filtering, 1460 .port_vlan_prepare = b53_vlan_prepare, 1461 .port_vlan_add = b53_vlan_add, 1462 .port_vlan_del = b53_vlan_del, 1463 .port_vlan_dump = b53_vlan_dump, 1464 .port_fdb_prepare = b53_fdb_prepare, 1465 .port_fdb_dump = b53_fdb_dump, 1466 .port_fdb_add = b53_fdb_add, 1467 .port_fdb_del = b53_fdb_del, 1468 }; 1469 1470 struct b53_chip_data { 1471 u32 chip_id; 1472 const char *dev_name; 1473 u16 vlans; 1474 u16 enabled_ports; 1475 u8 cpu_port; 1476 u8 vta_regs[3]; 1477 u8 arl_entries; 1478 u8 duplex_reg; 1479 u8 jumbo_pm_reg; 1480 u8 jumbo_size_reg; 1481 }; 1482 1483 #define B53_VTA_REGS \ 1484 { B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY } 1485 #define B53_VTA_REGS_9798 \ 1486 { B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 } 1487 #define B53_VTA_REGS_63XX \ 1488 { B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX } 1489 1490 static const struct b53_chip_data b53_switch_chips[] = { 1491 { 1492 .chip_id = BCM5325_DEVICE_ID, 1493 .dev_name = "BCM5325", 1494 .vlans = 16, 1495 .enabled_ports = 0x1f, 1496 .arl_entries = 2, 1497 .cpu_port = B53_CPU_PORT_25, 1498 .duplex_reg = B53_DUPLEX_STAT_FE, 1499 }, 1500 { 1501 .chip_id = BCM5365_DEVICE_ID, 1502 .dev_name = "BCM5365", 1503 .vlans = 256, 1504 .enabled_ports = 0x1f, 1505 .arl_entries = 2, 1506 .cpu_port = B53_CPU_PORT_25, 1507 .duplex_reg = B53_DUPLEX_STAT_FE, 1508 }, 1509 { 1510 .chip_id = BCM5395_DEVICE_ID, 1511 .dev_name = "BCM5395", 1512 .vlans = 4096, 1513 .enabled_ports = 0x1f, 1514 .arl_entries = 4, 1515 .cpu_port = B53_CPU_PORT, 1516 .vta_regs = B53_VTA_REGS, 1517 .duplex_reg = B53_DUPLEX_STAT_GE, 1518 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 1519 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 1520 }, 1521 { 1522 .chip_id = BCM5397_DEVICE_ID, 1523 .dev_name = "BCM5397", 1524 .vlans = 4096, 1525 .enabled_ports = 0x1f, 1526 .arl_entries = 4, 1527 .cpu_port = B53_CPU_PORT, 1528 .vta_regs = B53_VTA_REGS_9798, 1529 .duplex_reg = B53_DUPLEX_STAT_GE, 1530 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 1531 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 1532 }, 1533 { 1534 .chip_id = BCM5398_DEVICE_ID, 1535 .dev_name = "BCM5398", 1536 .vlans = 4096, 1537 .enabled_ports = 0x7f, 1538 .arl_entries = 4, 1539 .cpu_port = B53_CPU_PORT, 1540 .vta_regs = B53_VTA_REGS_9798, 1541 .duplex_reg = B53_DUPLEX_STAT_GE, 1542 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 1543 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 1544 }, 1545 { 1546 .chip_id = BCM53115_DEVICE_ID, 1547 .dev_name = "BCM53115", 1548 .vlans = 4096, 1549 .enabled_ports = 0x1f, 1550 .arl_entries = 4, 1551 .vta_regs = B53_VTA_REGS, 1552 .cpu_port = B53_CPU_PORT, 1553 .duplex_reg = B53_DUPLEX_STAT_GE, 1554 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 1555 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 1556 }, 1557 { 1558 .chip_id = BCM53125_DEVICE_ID, 1559 .dev_name = "BCM53125", 1560 .vlans = 4096, 1561 .enabled_ports = 0xff, 1562 .cpu_port = B53_CPU_PORT, 1563 .vta_regs = B53_VTA_REGS, 1564 .duplex_reg = B53_DUPLEX_STAT_GE, 1565 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 1566 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 1567 }, 1568 { 1569 .chip_id = BCM53128_DEVICE_ID, 1570 .dev_name = "BCM53128", 1571 .vlans = 4096, 1572 .enabled_ports = 0x1ff, 1573 .arl_entries = 4, 1574 .cpu_port = B53_CPU_PORT, 1575 .vta_regs = B53_VTA_REGS, 1576 .duplex_reg = B53_DUPLEX_STAT_GE, 1577 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 1578 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 1579 }, 1580 { 1581 .chip_id = BCM63XX_DEVICE_ID, 1582 .dev_name = "BCM63xx", 1583 .vlans = 4096, 1584 .enabled_ports = 0, /* pdata must provide them */ 1585 .arl_entries = 4, 1586 .cpu_port = B53_CPU_PORT, 1587 .vta_regs = B53_VTA_REGS_63XX, 1588 .duplex_reg = B53_DUPLEX_STAT_63XX, 1589 .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX, 1590 .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX, 1591 }, 1592 { 1593 .chip_id = BCM53010_DEVICE_ID, 1594 .dev_name = "BCM53010", 1595 .vlans = 4096, 1596 .enabled_ports = 0x1f, 1597 .arl_entries = 4, 1598 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ 1599 .vta_regs = B53_VTA_REGS, 1600 .duplex_reg = B53_DUPLEX_STAT_GE, 1601 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 1602 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 1603 }, 1604 { 1605 .chip_id = BCM53011_DEVICE_ID, 1606 .dev_name = "BCM53011", 1607 .vlans = 4096, 1608 .enabled_ports = 0x1bf, 1609 .arl_entries = 4, 1610 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ 1611 .vta_regs = B53_VTA_REGS, 1612 .duplex_reg = B53_DUPLEX_STAT_GE, 1613 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 1614 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 1615 }, 1616 { 1617 .chip_id = BCM53012_DEVICE_ID, 1618 .dev_name = "BCM53012", 1619 .vlans = 4096, 1620 .enabled_ports = 0x1bf, 1621 .arl_entries = 4, 1622 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ 1623 .vta_regs = B53_VTA_REGS, 1624 .duplex_reg = B53_DUPLEX_STAT_GE, 1625 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 1626 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 1627 }, 1628 { 1629 .chip_id = BCM53018_DEVICE_ID, 1630 .dev_name = "BCM53018", 1631 .vlans = 4096, 1632 .enabled_ports = 0x1f, 1633 .arl_entries = 4, 1634 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ 1635 .vta_regs = B53_VTA_REGS, 1636 .duplex_reg = B53_DUPLEX_STAT_GE, 1637 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 1638 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 1639 }, 1640 { 1641 .chip_id = BCM53019_DEVICE_ID, 1642 .dev_name = "BCM53019", 1643 .vlans = 4096, 1644 .enabled_ports = 0x1f, 1645 .arl_entries = 4, 1646 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ 1647 .vta_regs = B53_VTA_REGS, 1648 .duplex_reg = B53_DUPLEX_STAT_GE, 1649 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 1650 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 1651 }, 1652 { 1653 .chip_id = BCM58XX_DEVICE_ID, 1654 .dev_name = "BCM585xx/586xx/88312", 1655 .vlans = 4096, 1656 .enabled_ports = 0x1ff, 1657 .arl_entries = 4, 1658 .cpu_port = B53_CPU_PORT_25, 1659 .vta_regs = B53_VTA_REGS, 1660 .duplex_reg = B53_DUPLEX_STAT_GE, 1661 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 1662 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 1663 }, 1664 { 1665 .chip_id = BCM7445_DEVICE_ID, 1666 .dev_name = "BCM7445", 1667 .vlans = 4096, 1668 .enabled_ports = 0x1ff, 1669 .arl_entries = 4, 1670 .cpu_port = B53_CPU_PORT, 1671 .vta_regs = B53_VTA_REGS, 1672 .duplex_reg = B53_DUPLEX_STAT_GE, 1673 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 1674 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 1675 }, 1676 }; 1677 1678 static int b53_switch_init(struct b53_device *dev) 1679 { 1680 unsigned int i; 1681 int ret; 1682 1683 for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) { 1684 const struct b53_chip_data *chip = &b53_switch_chips[i]; 1685 1686 if (chip->chip_id == dev->chip_id) { 1687 if (!dev->enabled_ports) 1688 dev->enabled_ports = chip->enabled_ports; 1689 dev->name = chip->dev_name; 1690 dev->duplex_reg = chip->duplex_reg; 1691 dev->vta_regs[0] = chip->vta_regs[0]; 1692 dev->vta_regs[1] = chip->vta_regs[1]; 1693 dev->vta_regs[2] = chip->vta_regs[2]; 1694 dev->jumbo_pm_reg = chip->jumbo_pm_reg; 1695 dev->cpu_port = chip->cpu_port; 1696 dev->num_vlans = chip->vlans; 1697 dev->num_arl_entries = chip->arl_entries; 1698 break; 1699 } 1700 } 1701 1702 /* check which BCM5325x version we have */ 1703 if (is5325(dev)) { 1704 u8 vc4; 1705 1706 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4); 1707 1708 /* check reserved bits */ 1709 switch (vc4 & 3) { 1710 case 1: 1711 /* BCM5325E */ 1712 break; 1713 case 3: 1714 /* BCM5325F - do not use port 4 */ 1715 dev->enabled_ports &= ~BIT(4); 1716 break; 1717 default: 1718 /* On the BCM47XX SoCs this is the supported internal switch.*/ 1719 #ifndef CONFIG_BCM47XX 1720 /* BCM5325M */ 1721 return -EINVAL; 1722 #else 1723 break; 1724 #endif 1725 } 1726 } else if (dev->chip_id == BCM53115_DEVICE_ID) { 1727 u64 strap_value; 1728 1729 b53_read48(dev, B53_STAT_PAGE, B53_STRAP_VALUE, &strap_value); 1730 /* use second IMP port if GMII is enabled */ 1731 if (strap_value & SV_GMII_CTRL_115) 1732 dev->cpu_port = 5; 1733 } 1734 1735 /* cpu port is always last */ 1736 dev->num_ports = dev->cpu_port + 1; 1737 dev->enabled_ports |= BIT(dev->cpu_port); 1738 1739 dev->ports = devm_kzalloc(dev->dev, 1740 sizeof(struct b53_port) * dev->num_ports, 1741 GFP_KERNEL); 1742 if (!dev->ports) 1743 return -ENOMEM; 1744 1745 dev->vlans = devm_kzalloc(dev->dev, 1746 sizeof(struct b53_vlan) * dev->num_vlans, 1747 GFP_KERNEL); 1748 if (!dev->vlans) 1749 return -ENOMEM; 1750 1751 dev->reset_gpio = b53_switch_get_reset_gpio(dev); 1752 if (dev->reset_gpio >= 0) { 1753 ret = devm_gpio_request_one(dev->dev, dev->reset_gpio, 1754 GPIOF_OUT_INIT_HIGH, "robo_reset"); 1755 if (ret) 1756 return ret; 1757 } 1758 1759 return 0; 1760 } 1761 1762 struct b53_device *b53_switch_alloc(struct device *base, 1763 const struct b53_io_ops *ops, 1764 void *priv) 1765 { 1766 struct dsa_switch *ds; 1767 struct b53_device *dev; 1768 1769 ds = devm_kzalloc(base, sizeof(*ds) + sizeof(*dev), GFP_KERNEL); 1770 if (!ds) 1771 return NULL; 1772 1773 dev = (struct b53_device *)(ds + 1); 1774 1775 ds->priv = dev; 1776 ds->dev = base; 1777 dev->dev = base; 1778 1779 dev->ds = ds; 1780 dev->priv = priv; 1781 dev->ops = ops; 1782 ds->ops = &b53_switch_ops; 1783 mutex_init(&dev->reg_mutex); 1784 mutex_init(&dev->stats_mutex); 1785 1786 return dev; 1787 } 1788 EXPORT_SYMBOL(b53_switch_alloc); 1789 1790 int b53_switch_detect(struct b53_device *dev) 1791 { 1792 u32 id32; 1793 u16 tmp; 1794 u8 id8; 1795 int ret; 1796 1797 ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8); 1798 if (ret) 1799 return ret; 1800 1801 switch (id8) { 1802 case 0: 1803 /* BCM5325 and BCM5365 do not have this register so reads 1804 * return 0. But the read operation did succeed, so assume this 1805 * is one of them. 1806 * 1807 * Next check if we can write to the 5325's VTA register; for 1808 * 5365 it is read only. 1809 */ 1810 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf); 1811 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp); 1812 1813 if (tmp == 0xf) 1814 dev->chip_id = BCM5325_DEVICE_ID; 1815 else 1816 dev->chip_id = BCM5365_DEVICE_ID; 1817 break; 1818 case BCM5395_DEVICE_ID: 1819 case BCM5397_DEVICE_ID: 1820 case BCM5398_DEVICE_ID: 1821 dev->chip_id = id8; 1822 break; 1823 default: 1824 ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32); 1825 if (ret) 1826 return ret; 1827 1828 switch (id32) { 1829 case BCM53115_DEVICE_ID: 1830 case BCM53125_DEVICE_ID: 1831 case BCM53128_DEVICE_ID: 1832 case BCM53010_DEVICE_ID: 1833 case BCM53011_DEVICE_ID: 1834 case BCM53012_DEVICE_ID: 1835 case BCM53018_DEVICE_ID: 1836 case BCM53019_DEVICE_ID: 1837 dev->chip_id = id32; 1838 break; 1839 default: 1840 pr_err("unsupported switch detected (BCM53%02x/BCM%x)\n", 1841 id8, id32); 1842 return -ENODEV; 1843 } 1844 } 1845 1846 if (dev->chip_id == BCM5325_DEVICE_ID) 1847 return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25, 1848 &dev->core_rev); 1849 else 1850 return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID, 1851 &dev->core_rev); 1852 } 1853 EXPORT_SYMBOL(b53_switch_detect); 1854 1855 int b53_switch_register(struct b53_device *dev) 1856 { 1857 int ret; 1858 1859 if (dev->pdata) { 1860 dev->chip_id = dev->pdata->chip_id; 1861 dev->enabled_ports = dev->pdata->enabled_ports; 1862 } 1863 1864 if (!dev->chip_id && b53_switch_detect(dev)) 1865 return -EINVAL; 1866 1867 ret = b53_switch_init(dev); 1868 if (ret) 1869 return ret; 1870 1871 pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev); 1872 1873 return dsa_register_switch(dev->ds, dev->ds->dev->of_node); 1874 } 1875 EXPORT_SYMBOL(b53_switch_register); 1876 1877 MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>"); 1878 MODULE_DESCRIPTION("B53 switch library"); 1879 MODULE_LICENSE("Dual BSD/GPL"); 1880