1 // SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 /* 3 * Microsemi Ocelot Switch driver 4 * 5 * Copyright (c) 2017 Microsemi Corporation 6 */ 7 #include <linux/dsa/ocelot.h> 8 #include <linux/if_bridge.h> 9 #include <linux/ptp_classify.h> 10 #include <soc/mscc/ocelot_vcap.h> 11 #include "ocelot.h" 12 #include "ocelot_vcap.h" 13 14 #define TABLE_UPDATE_SLEEP_US 10 15 #define TABLE_UPDATE_TIMEOUT_US 100000 16 17 struct ocelot_mact_entry { 18 u8 mac[ETH_ALEN]; 19 u16 vid; 20 enum macaccess_entry_type type; 21 }; 22 23 /* Caller must hold &ocelot->mact_lock */ 24 static inline u32 ocelot_mact_read_macaccess(struct ocelot *ocelot) 25 { 26 return ocelot_read(ocelot, ANA_TABLES_MACACCESS); 27 } 28 29 /* Caller must hold &ocelot->mact_lock */ 30 static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot) 31 { 32 u32 val; 33 34 return readx_poll_timeout(ocelot_mact_read_macaccess, 35 ocelot, val, 36 (val & ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M) == 37 MACACCESS_CMD_IDLE, 38 TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US); 39 } 40 41 /* Caller must hold &ocelot->mact_lock */ 42 static void ocelot_mact_select(struct ocelot *ocelot, 43 const unsigned char mac[ETH_ALEN], 44 unsigned int vid) 45 { 46 u32 macl = 0, mach = 0; 47 48 /* Set the MAC address to handle and the vlan associated in a format 49 * understood by the hardware. 50 */ 51 mach |= vid << 16; 52 mach |= mac[0] << 8; 53 mach |= mac[1] << 0; 54 macl |= mac[2] << 24; 55 macl |= mac[3] << 16; 56 macl |= mac[4] << 8; 57 macl |= mac[5] << 0; 58 59 ocelot_write(ocelot, macl, ANA_TABLES_MACLDATA); 60 ocelot_write(ocelot, mach, ANA_TABLES_MACHDATA); 61 62 } 63 64 int ocelot_mact_learn(struct ocelot *ocelot, int port, 65 const unsigned char mac[ETH_ALEN], 66 unsigned int vid, enum macaccess_entry_type type) 67 { 68 u32 cmd = ANA_TABLES_MACACCESS_VALID | 69 ANA_TABLES_MACACCESS_DEST_IDX(port) | 70 ANA_TABLES_MACACCESS_ENTRYTYPE(type) | 71 ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN); 72 unsigned int mc_ports; 73 int err; 74 75 /* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */ 76 if (type == ENTRYTYPE_MACv4) 77 mc_ports = (mac[1] << 8) | mac[2]; 78 else if (type == ENTRYTYPE_MACv6) 79 mc_ports = (mac[0] << 8) | mac[1]; 80 else 81 mc_ports = 0; 82 83 if (mc_ports & BIT(ocelot->num_phys_ports)) 84 cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY; 85 86 mutex_lock(&ocelot->mact_lock); 87 88 ocelot_mact_select(ocelot, mac, vid); 89 90 /* Issue a write command */ 91 ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS); 92 93 err = ocelot_mact_wait_for_completion(ocelot); 94 95 mutex_unlock(&ocelot->mact_lock); 96 97 return err; 98 } 99 EXPORT_SYMBOL(ocelot_mact_learn); 100 101 int ocelot_mact_forget(struct ocelot *ocelot, 102 const unsigned char mac[ETH_ALEN], unsigned int vid) 103 { 104 int err; 105 106 mutex_lock(&ocelot->mact_lock); 107 108 ocelot_mact_select(ocelot, mac, vid); 109 110 /* Issue a forget command */ 111 ocelot_write(ocelot, 112 ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_FORGET), 113 ANA_TABLES_MACACCESS); 114 115 err = ocelot_mact_wait_for_completion(ocelot); 116 117 mutex_unlock(&ocelot->mact_lock); 118 119 return err; 120 } 121 EXPORT_SYMBOL(ocelot_mact_forget); 122 123 static void ocelot_mact_init(struct ocelot *ocelot) 124 { 125 /* Configure the learning mode entries attributes: 126 * - Do not copy the frame to the CPU extraction queues. 127 * - Use the vlan and mac_cpoy for dmac lookup. 128 */ 129 ocelot_rmw(ocelot, 0, 130 ANA_AGENCTRL_LEARN_CPU_COPY | ANA_AGENCTRL_IGNORE_DMAC_FLAGS 131 | ANA_AGENCTRL_LEARN_FWD_KILL 132 | ANA_AGENCTRL_LEARN_IGNORE_VLAN, 133 ANA_AGENCTRL); 134 135 /* Clear the MAC table. We are not concurrent with anyone, so 136 * holding &ocelot->mact_lock is pointless. 137 */ 138 ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS); 139 } 140 141 static void ocelot_vcap_enable(struct ocelot *ocelot, int port) 142 { 143 ocelot_write_gix(ocelot, ANA_PORT_VCAP_S2_CFG_S2_ENA | 144 ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(0xa), 145 ANA_PORT_VCAP_S2_CFG, port); 146 147 ocelot_write_gix(ocelot, ANA_PORT_VCAP_CFG_S1_ENA, 148 ANA_PORT_VCAP_CFG, port); 149 150 ocelot_rmw_gix(ocelot, REW_PORT_CFG_ES0_EN, 151 REW_PORT_CFG_ES0_EN, 152 REW_PORT_CFG, port); 153 } 154 155 static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot) 156 { 157 return ocelot_read(ocelot, ANA_TABLES_VLANACCESS); 158 } 159 160 static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot) 161 { 162 u32 val; 163 164 return readx_poll_timeout(ocelot_vlant_read_vlanaccess, 165 ocelot, 166 val, 167 (val & ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M) == 168 ANA_TABLES_VLANACCESS_CMD_IDLE, 169 TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US); 170 } 171 172 static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask) 173 { 174 /* Select the VID to configure */ 175 ocelot_write(ocelot, ANA_TABLES_VLANTIDX_V_INDEX(vid), 176 ANA_TABLES_VLANTIDX); 177 /* Set the vlan port members mask and issue a write command */ 178 ocelot_write(ocelot, ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(mask) | 179 ANA_TABLES_VLANACCESS_CMD_WRITE, 180 ANA_TABLES_VLANACCESS); 181 182 return ocelot_vlant_wait_for_completion(ocelot); 183 } 184 185 static int ocelot_port_num_untagged_vlans(struct ocelot *ocelot, int port) 186 { 187 struct ocelot_bridge_vlan *vlan; 188 int num_untagged = 0; 189 190 list_for_each_entry(vlan, &ocelot->vlans, list) { 191 if (!(vlan->portmask & BIT(port))) 192 continue; 193 194 if (vlan->untagged & BIT(port)) 195 num_untagged++; 196 } 197 198 return num_untagged; 199 } 200 201 static int ocelot_port_num_tagged_vlans(struct ocelot *ocelot, int port) 202 { 203 struct ocelot_bridge_vlan *vlan; 204 int num_tagged = 0; 205 206 list_for_each_entry(vlan, &ocelot->vlans, list) { 207 if (!(vlan->portmask & BIT(port))) 208 continue; 209 210 if (!(vlan->untagged & BIT(port))) 211 num_tagged++; 212 } 213 214 return num_tagged; 215 } 216 217 /* We use native VLAN when we have to mix egress-tagged VLANs with exactly 218 * _one_ egress-untagged VLAN (_the_ native VLAN) 219 */ 220 static bool ocelot_port_uses_native_vlan(struct ocelot *ocelot, int port) 221 { 222 return ocelot_port_num_tagged_vlans(ocelot, port) && 223 ocelot_port_num_untagged_vlans(ocelot, port) == 1; 224 } 225 226 static struct ocelot_bridge_vlan * 227 ocelot_port_find_native_vlan(struct ocelot *ocelot, int port) 228 { 229 struct ocelot_bridge_vlan *vlan; 230 231 list_for_each_entry(vlan, &ocelot->vlans, list) 232 if (vlan->portmask & BIT(port) && vlan->untagged & BIT(port)) 233 return vlan; 234 235 return NULL; 236 } 237 238 /* Keep in sync REW_TAG_CFG_TAG_CFG and, if applicable, 239 * REW_PORT_VLAN_CFG_PORT_VID, with the bridge VLAN table and VLAN awareness 240 * state of the port. 241 */ 242 static void ocelot_port_manage_port_tag(struct ocelot *ocelot, int port) 243 { 244 struct ocelot_port *ocelot_port = ocelot->ports[port]; 245 enum ocelot_port_tag_config tag_cfg; 246 bool uses_native_vlan = false; 247 248 if (ocelot_port->vlan_aware) { 249 uses_native_vlan = ocelot_port_uses_native_vlan(ocelot, port); 250 251 if (uses_native_vlan) 252 tag_cfg = OCELOT_PORT_TAG_NATIVE; 253 else if (ocelot_port_num_untagged_vlans(ocelot, port)) 254 tag_cfg = OCELOT_PORT_TAG_DISABLED; 255 else 256 tag_cfg = OCELOT_PORT_TAG_TRUNK; 257 } else { 258 tag_cfg = OCELOT_PORT_TAG_DISABLED; 259 } 260 261 ocelot_rmw_gix(ocelot, REW_TAG_CFG_TAG_CFG(tag_cfg), 262 REW_TAG_CFG_TAG_CFG_M, 263 REW_TAG_CFG, port); 264 265 if (uses_native_vlan) { 266 struct ocelot_bridge_vlan *native_vlan; 267 268 /* Not having a native VLAN is impossible, because 269 * ocelot_port_num_untagged_vlans has returned 1. 270 * So there is no use in checking for NULL here. 271 */ 272 native_vlan = ocelot_port_find_native_vlan(ocelot, port); 273 274 ocelot_rmw_gix(ocelot, 275 REW_PORT_VLAN_CFG_PORT_VID(native_vlan->vid), 276 REW_PORT_VLAN_CFG_PORT_VID_M, 277 REW_PORT_VLAN_CFG, port); 278 } 279 } 280 281 /* Default vlan to clasify for untagged frames (may be zero) */ 282 static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, 283 const struct ocelot_bridge_vlan *pvid_vlan) 284 { 285 struct ocelot_port *ocelot_port = ocelot->ports[port]; 286 u16 pvid = OCELOT_VLAN_UNAWARE_PVID; 287 u32 val = 0; 288 289 ocelot_port->pvid_vlan = pvid_vlan; 290 291 if (ocelot_port->vlan_aware && pvid_vlan) 292 pvid = pvid_vlan->vid; 293 294 ocelot_rmw_gix(ocelot, 295 ANA_PORT_VLAN_CFG_VLAN_VID(pvid), 296 ANA_PORT_VLAN_CFG_VLAN_VID_M, 297 ANA_PORT_VLAN_CFG, port); 298 299 /* If there's no pvid, we should drop not only untagged traffic (which 300 * happens automatically), but also 802.1p traffic which gets 301 * classified to VLAN 0, but that is always in our RX filter, so it 302 * would get accepted were it not for this setting. 303 */ 304 if (!pvid_vlan && ocelot_port->vlan_aware) 305 val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA | 306 ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA; 307 308 ocelot_rmw_gix(ocelot, val, 309 ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA | 310 ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, 311 ANA_PORT_DROP_CFG, port); 312 } 313 314 static struct ocelot_bridge_vlan *ocelot_bridge_vlan_find(struct ocelot *ocelot, 315 u16 vid) 316 { 317 struct ocelot_bridge_vlan *vlan; 318 319 list_for_each_entry(vlan, &ocelot->vlans, list) 320 if (vlan->vid == vid) 321 return vlan; 322 323 return NULL; 324 } 325 326 static int ocelot_vlan_member_add(struct ocelot *ocelot, int port, u16 vid, 327 bool untagged) 328 { 329 struct ocelot_bridge_vlan *vlan = ocelot_bridge_vlan_find(ocelot, vid); 330 unsigned long portmask; 331 int err; 332 333 if (vlan) { 334 portmask = vlan->portmask | BIT(port); 335 336 err = ocelot_vlant_set_mask(ocelot, vid, portmask); 337 if (err) 338 return err; 339 340 vlan->portmask = portmask; 341 /* Bridge VLANs can be overwritten with a different 342 * egress-tagging setting, so make sure to override an untagged 343 * with a tagged VID if that's going on. 344 */ 345 if (untagged) 346 vlan->untagged |= BIT(port); 347 else 348 vlan->untagged &= ~BIT(port); 349 350 return 0; 351 } 352 353 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 354 if (!vlan) 355 return -ENOMEM; 356 357 portmask = BIT(port); 358 359 err = ocelot_vlant_set_mask(ocelot, vid, portmask); 360 if (err) { 361 kfree(vlan); 362 return err; 363 } 364 365 vlan->vid = vid; 366 vlan->portmask = portmask; 367 if (untagged) 368 vlan->untagged = BIT(port); 369 INIT_LIST_HEAD(&vlan->list); 370 list_add_tail(&vlan->list, &ocelot->vlans); 371 372 return 0; 373 } 374 375 static int ocelot_vlan_member_del(struct ocelot *ocelot, int port, u16 vid) 376 { 377 struct ocelot_bridge_vlan *vlan = ocelot_bridge_vlan_find(ocelot, vid); 378 unsigned long portmask; 379 int err; 380 381 if (!vlan) 382 return 0; 383 384 portmask = vlan->portmask & ~BIT(port); 385 386 err = ocelot_vlant_set_mask(ocelot, vid, portmask); 387 if (err) 388 return err; 389 390 vlan->portmask = portmask; 391 if (vlan->portmask) 392 return 0; 393 394 list_del(&vlan->list); 395 kfree(vlan); 396 397 return 0; 398 } 399 400 int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, 401 bool vlan_aware, struct netlink_ext_ack *extack) 402 { 403 struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1]; 404 struct ocelot_port *ocelot_port = ocelot->ports[port]; 405 struct ocelot_vcap_filter *filter; 406 u32 val; 407 408 list_for_each_entry(filter, &block->rules, list) { 409 if (filter->ingress_port_mask & BIT(port) && 410 filter->action.vid_replace_ena) { 411 NL_SET_ERR_MSG_MOD(extack, 412 "Cannot change VLAN state with vlan modify rules active"); 413 return -EBUSY; 414 } 415 } 416 417 ocelot_port->vlan_aware = vlan_aware; 418 419 if (vlan_aware) 420 val = ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | 421 ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1); 422 else 423 val = 0; 424 ocelot_rmw_gix(ocelot, val, 425 ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | 426 ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M, 427 ANA_PORT_VLAN_CFG, port); 428 429 ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan); 430 ocelot_port_manage_port_tag(ocelot, port); 431 432 return 0; 433 } 434 EXPORT_SYMBOL(ocelot_port_vlan_filtering); 435 436 int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid, 437 bool untagged, struct netlink_ext_ack *extack) 438 { 439 if (untagged) { 440 /* We are adding an egress-tagged VLAN */ 441 if (ocelot_port_uses_native_vlan(ocelot, port)) { 442 NL_SET_ERR_MSG_MOD(extack, 443 "Port with egress-tagged VLANs cannot have more than one egress-untagged (native) VLAN"); 444 return -EBUSY; 445 } 446 } else { 447 /* We are adding an egress-tagged VLAN */ 448 if (ocelot_port_num_untagged_vlans(ocelot, port) > 1) { 449 NL_SET_ERR_MSG_MOD(extack, 450 "Port with more than one egress-untagged VLAN cannot have egress-tagged VLANs"); 451 return -EBUSY; 452 } 453 } 454 455 return 0; 456 } 457 EXPORT_SYMBOL(ocelot_vlan_prepare); 458 459 int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, 460 bool untagged) 461 { 462 int err; 463 464 err = ocelot_vlan_member_add(ocelot, port, vid, untagged); 465 if (err) 466 return err; 467 468 /* Default ingress vlan classification */ 469 if (pvid) 470 ocelot_port_set_pvid(ocelot, port, 471 ocelot_bridge_vlan_find(ocelot, vid)); 472 473 /* Untagged egress vlan clasification */ 474 ocelot_port_manage_port_tag(ocelot, port); 475 476 return 0; 477 } 478 EXPORT_SYMBOL(ocelot_vlan_add); 479 480 int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid) 481 { 482 struct ocelot_port *ocelot_port = ocelot->ports[port]; 483 int err; 484 485 err = ocelot_vlan_member_del(ocelot, port, vid); 486 if (err) 487 return err; 488 489 /* Ingress */ 490 if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid) 491 ocelot_port_set_pvid(ocelot, port, NULL); 492 493 /* Egress */ 494 ocelot_port_manage_port_tag(ocelot, port); 495 496 return 0; 497 } 498 EXPORT_SYMBOL(ocelot_vlan_del); 499 500 static void ocelot_vlan_init(struct ocelot *ocelot) 501 { 502 unsigned long all_ports = GENMASK(ocelot->num_phys_ports - 1, 0); 503 u16 port, vid; 504 505 /* Clear VLAN table, by default all ports are members of all VLANs */ 506 ocelot_write(ocelot, ANA_TABLES_VLANACCESS_CMD_INIT, 507 ANA_TABLES_VLANACCESS); 508 ocelot_vlant_wait_for_completion(ocelot); 509 510 /* Configure the port VLAN memberships */ 511 for (vid = 1; vid < VLAN_N_VID; vid++) 512 ocelot_vlant_set_mask(ocelot, vid, 0); 513 514 /* Because VLAN filtering is enabled, we need VID 0 to get untagged 515 * traffic. It is added automatically if 8021q module is loaded, but 516 * we can't rely on it since module may be not loaded. 517 */ 518 ocelot_vlant_set_mask(ocelot, OCELOT_VLAN_UNAWARE_PVID, all_ports); 519 520 /* Set vlan ingress filter mask to all ports but the CPU port by 521 * default. 522 */ 523 ocelot_write(ocelot, all_ports, ANA_VLANMASK); 524 525 for (port = 0; port < ocelot->num_phys_ports; port++) { 526 ocelot_write_gix(ocelot, 0, REW_PORT_VLAN_CFG, port); 527 ocelot_write_gix(ocelot, 0, REW_TAG_CFG, port); 528 } 529 } 530 531 static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port) 532 { 533 return ocelot_read_rix(ocelot, QSYS_SW_STATUS, port); 534 } 535 536 static int ocelot_port_flush(struct ocelot *ocelot, int port) 537 { 538 unsigned int pause_ena; 539 int err, val; 540 541 /* Disable dequeuing from the egress queues */ 542 ocelot_rmw_rix(ocelot, QSYS_PORT_MODE_DEQUEUE_DIS, 543 QSYS_PORT_MODE_DEQUEUE_DIS, 544 QSYS_PORT_MODE, port); 545 546 /* Disable flow control */ 547 ocelot_fields_read(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, &pause_ena); 548 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0); 549 550 /* Disable priority flow control */ 551 ocelot_fields_write(ocelot, port, 552 QSYS_SWITCH_PORT_MODE_TX_PFC_ENA, 0); 553 554 /* Wait at least the time it takes to receive a frame of maximum length 555 * at the port. 556 * Worst-case delays for 10 kilobyte jumbo frames are: 557 * 8 ms on a 10M port 558 * 800 μs on a 100M port 559 * 80 μs on a 1G port 560 * 32 μs on a 2.5G port 561 */ 562 usleep_range(8000, 10000); 563 564 /* Disable half duplex backpressure. */ 565 ocelot_rmw_rix(ocelot, 0, SYS_FRONT_PORT_MODE_HDX_MODE, 566 SYS_FRONT_PORT_MODE, port); 567 568 /* Flush the queues associated with the port. */ 569 ocelot_rmw_gix(ocelot, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG_FLUSH_ENA, 570 REW_PORT_CFG, port); 571 572 /* Enable dequeuing from the egress queues. */ 573 ocelot_rmw_rix(ocelot, 0, QSYS_PORT_MODE_DEQUEUE_DIS, QSYS_PORT_MODE, 574 port); 575 576 /* Wait until flushing is complete. */ 577 err = read_poll_timeout(ocelot_read_eq_avail, val, !val, 578 100, 2000000, false, ocelot, port); 579 580 /* Clear flushing again. */ 581 ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port); 582 583 /* Re-enable flow control */ 584 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, pause_ena); 585 586 return err; 587 } 588 589 void ocelot_phylink_mac_link_down(struct ocelot *ocelot, int port, 590 unsigned int link_an_mode, 591 phy_interface_t interface, 592 unsigned long quirks) 593 { 594 struct ocelot_port *ocelot_port = ocelot->ports[port]; 595 int err; 596 597 ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA, 598 DEV_MAC_ENA_CFG); 599 600 ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0); 601 602 err = ocelot_port_flush(ocelot, port); 603 if (err) 604 dev_err(ocelot->dev, "failed to flush port %d: %d\n", 605 port, err); 606 607 /* Put the port in reset. */ 608 if (interface != PHY_INTERFACE_MODE_QSGMII || 609 !(quirks & OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP)) 610 ocelot_port_rmwl(ocelot_port, 611 DEV_CLOCK_CFG_MAC_TX_RST | 612 DEV_CLOCK_CFG_MAC_RX_RST, 613 DEV_CLOCK_CFG_MAC_TX_RST | 614 DEV_CLOCK_CFG_MAC_RX_RST, 615 DEV_CLOCK_CFG); 616 } 617 EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_down); 618 619 void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port, 620 struct phy_device *phydev, 621 unsigned int link_an_mode, 622 phy_interface_t interface, 623 int speed, int duplex, 624 bool tx_pause, bool rx_pause, 625 unsigned long quirks) 626 { 627 struct ocelot_port *ocelot_port = ocelot->ports[port]; 628 int mac_speed, mode = 0; 629 u32 mac_fc_cfg; 630 631 /* The MAC might be integrated in systems where the MAC speed is fixed 632 * and it's the PCS who is performing the rate adaptation, so we have 633 * to write "1000Mbps" into the LINK_SPEED field of DEV_CLOCK_CFG 634 * (which is also its default value). 635 */ 636 if ((quirks & OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION) || 637 speed == SPEED_1000) { 638 mac_speed = OCELOT_SPEED_1000; 639 mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA; 640 } else if (speed == SPEED_2500) { 641 mac_speed = OCELOT_SPEED_2500; 642 mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA; 643 } else if (speed == SPEED_100) { 644 mac_speed = OCELOT_SPEED_100; 645 } else { 646 mac_speed = OCELOT_SPEED_10; 647 } 648 649 if (duplex == DUPLEX_FULL) 650 mode |= DEV_MAC_MODE_CFG_FDX_ENA; 651 652 ocelot_port_writel(ocelot_port, mode, DEV_MAC_MODE_CFG); 653 654 /* Take port out of reset by clearing the MAC_TX_RST, MAC_RX_RST and 655 * PORT_RST bits in DEV_CLOCK_CFG. 656 */ 657 ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(mac_speed), 658 DEV_CLOCK_CFG); 659 660 switch (speed) { 661 case SPEED_10: 662 mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_10); 663 break; 664 case SPEED_100: 665 mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_100); 666 break; 667 case SPEED_1000: 668 case SPEED_2500: 669 mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_1000); 670 break; 671 default: 672 dev_err(ocelot->dev, "Unsupported speed on port %d: %d\n", 673 port, speed); 674 return; 675 } 676 677 /* Handle RX pause in all cases, with 2500base-X this is used for rate 678 * adaptation. 679 */ 680 mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA; 681 682 if (tx_pause) 683 mac_fc_cfg |= SYS_MAC_FC_CFG_TX_FC_ENA | 684 SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) | 685 SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) | 686 SYS_MAC_FC_CFG_ZERO_PAUSE_ENA; 687 688 /* Flow control. Link speed is only used here to evaluate the time 689 * specification in incoming pause frames. 690 */ 691 ocelot_write_rix(ocelot, mac_fc_cfg, SYS_MAC_FC_CFG, port); 692 693 ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port); 694 695 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, tx_pause); 696 697 /* Undo the effects of ocelot_phylink_mac_link_down: 698 * enable MAC module 699 */ 700 ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA | 701 DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG); 702 703 /* Core: Enable port for frame transfer */ 704 ocelot_fields_write(ocelot, port, 705 QSYS_SWITCH_PORT_MODE_PORT_ENA, 1); 706 } 707 EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_up); 708 709 static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port, 710 struct sk_buff *clone) 711 { 712 struct ocelot_port *ocelot_port = ocelot->ports[port]; 713 unsigned long flags; 714 715 spin_lock_irqsave(&ocelot->ts_id_lock, flags); 716 717 if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID || 718 ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) { 719 spin_unlock_irqrestore(&ocelot->ts_id_lock, flags); 720 return -EBUSY; 721 } 722 723 skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS; 724 /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */ 725 OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id; 726 727 ocelot_port->ts_id++; 728 if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID) 729 ocelot_port->ts_id = 0; 730 731 ocelot_port->ptp_skbs_in_flight++; 732 ocelot->ptp_skbs_in_flight++; 733 734 skb_queue_tail(&ocelot_port->tx_skbs, clone); 735 736 spin_unlock_irqrestore(&ocelot->ts_id_lock, flags); 737 738 return 0; 739 } 740 741 static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb, 742 unsigned int ptp_class) 743 { 744 struct ptp_header *hdr; 745 u8 msgtype, twostep; 746 747 hdr = ptp_parse_header(skb, ptp_class); 748 if (!hdr) 749 return false; 750 751 msgtype = ptp_get_msgtype(hdr, ptp_class); 752 twostep = hdr->flag_field[0] & 0x2; 753 754 if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0) 755 return true; 756 757 return false; 758 } 759 760 int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port, 761 struct sk_buff *skb, 762 struct sk_buff **clone) 763 { 764 struct ocelot_port *ocelot_port = ocelot->ports[port]; 765 u8 ptp_cmd = ocelot_port->ptp_cmd; 766 unsigned int ptp_class; 767 int err; 768 769 /* Don't do anything if PTP timestamping not enabled */ 770 if (!ptp_cmd) 771 return 0; 772 773 ptp_class = ptp_classify_raw(skb); 774 if (ptp_class == PTP_CLASS_NONE) 775 return -EINVAL; 776 777 /* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */ 778 if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) { 779 if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) { 780 OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd; 781 return 0; 782 } 783 784 /* Fall back to two-step timestamping */ 785 ptp_cmd = IFH_REW_OP_TWO_STEP_PTP; 786 } 787 788 if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) { 789 *clone = skb_clone_sk(skb); 790 if (!(*clone)) 791 return -ENOMEM; 792 793 err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone); 794 if (err) 795 return err; 796 797 OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd; 798 OCELOT_SKB_CB(*clone)->ptp_class = ptp_class; 799 } 800 801 return 0; 802 } 803 EXPORT_SYMBOL(ocelot_port_txtstamp_request); 804 805 static void ocelot_get_hwtimestamp(struct ocelot *ocelot, 806 struct timespec64 *ts) 807 { 808 unsigned long flags; 809 u32 val; 810 811 spin_lock_irqsave(&ocelot->ptp_clock_lock, flags); 812 813 /* Read current PTP time to get seconds */ 814 val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN); 815 816 val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM); 817 val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE); 818 ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN); 819 ts->tv_sec = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN); 820 821 /* Read packet HW timestamp from FIFO */ 822 val = ocelot_read(ocelot, SYS_PTP_TXSTAMP); 823 ts->tv_nsec = SYS_PTP_TXSTAMP_PTP_TXSTAMP(val); 824 825 /* Sec has incremented since the ts was registered */ 826 if ((ts->tv_sec & 0x1) != !!(val & SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC)) 827 ts->tv_sec--; 828 829 spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags); 830 } 831 832 static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid) 833 { 834 struct ptp_header *hdr; 835 836 hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class); 837 if (WARN_ON(!hdr)) 838 return false; 839 840 return seqid == ntohs(hdr->sequence_id); 841 } 842 843 void ocelot_get_txtstamp(struct ocelot *ocelot) 844 { 845 int budget = OCELOT_PTP_QUEUE_SZ; 846 847 while (budget--) { 848 struct sk_buff *skb, *skb_tmp, *skb_match = NULL; 849 struct skb_shared_hwtstamps shhwtstamps; 850 u32 val, id, seqid, txport; 851 struct ocelot_port *port; 852 struct timespec64 ts; 853 unsigned long flags; 854 855 val = ocelot_read(ocelot, SYS_PTP_STATUS); 856 857 /* Check if a timestamp can be retrieved */ 858 if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD)) 859 break; 860 861 WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL); 862 863 /* Retrieve the ts ID and Tx port */ 864 id = SYS_PTP_STATUS_PTP_MESS_ID_X(val); 865 txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val); 866 seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val); 867 868 port = ocelot->ports[txport]; 869 870 spin_lock(&ocelot->ts_id_lock); 871 port->ptp_skbs_in_flight--; 872 ocelot->ptp_skbs_in_flight--; 873 spin_unlock(&ocelot->ts_id_lock); 874 875 /* Retrieve its associated skb */ 876 try_again: 877 spin_lock_irqsave(&port->tx_skbs.lock, flags); 878 879 skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) { 880 if (OCELOT_SKB_CB(skb)->ts_id != id) 881 continue; 882 __skb_unlink(skb, &port->tx_skbs); 883 skb_match = skb; 884 break; 885 } 886 887 spin_unlock_irqrestore(&port->tx_skbs.lock, flags); 888 889 if (WARN_ON(!skb_match)) 890 continue; 891 892 if (!ocelot_validate_ptp_skb(skb_match, seqid)) { 893 dev_err_ratelimited(ocelot->dev, 894 "port %d received stale TX timestamp for seqid %d, discarding\n", 895 txport, seqid); 896 dev_kfree_skb_any(skb); 897 goto try_again; 898 } 899 900 /* Get the h/w timestamp */ 901 ocelot_get_hwtimestamp(ocelot, &ts); 902 903 /* Set the timestamp into the skb */ 904 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 905 shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); 906 skb_complete_tx_timestamp(skb_match, &shhwtstamps); 907 908 /* Next ts */ 909 ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT); 910 } 911 } 912 EXPORT_SYMBOL(ocelot_get_txtstamp); 913 914 static int ocelot_rx_frame_word(struct ocelot *ocelot, u8 grp, bool ifh, 915 u32 *rval) 916 { 917 u32 bytes_valid, val; 918 919 val = ocelot_read_rix(ocelot, QS_XTR_RD, grp); 920 if (val == XTR_NOT_READY) { 921 if (ifh) 922 return -EIO; 923 924 do { 925 val = ocelot_read_rix(ocelot, QS_XTR_RD, grp); 926 } while (val == XTR_NOT_READY); 927 } 928 929 switch (val) { 930 case XTR_ABORT: 931 return -EIO; 932 case XTR_EOF_0: 933 case XTR_EOF_1: 934 case XTR_EOF_2: 935 case XTR_EOF_3: 936 case XTR_PRUNED: 937 bytes_valid = XTR_VALID_BYTES(val); 938 val = ocelot_read_rix(ocelot, QS_XTR_RD, grp); 939 if (val == XTR_ESCAPE) 940 *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp); 941 else 942 *rval = val; 943 944 return bytes_valid; 945 case XTR_ESCAPE: 946 *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp); 947 948 return 4; 949 default: 950 *rval = val; 951 952 return 4; 953 } 954 } 955 956 static int ocelot_xtr_poll_xfh(struct ocelot *ocelot, int grp, u32 *xfh) 957 { 958 int i, err = 0; 959 960 for (i = 0; i < OCELOT_TAG_LEN / 4; i++) { 961 err = ocelot_rx_frame_word(ocelot, grp, true, &xfh[i]); 962 if (err != 4) 963 return (err < 0) ? err : -EIO; 964 } 965 966 return 0; 967 } 968 969 int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb) 970 { 971 struct skb_shared_hwtstamps *shhwtstamps; 972 u64 tod_in_ns, full_ts_in_ns; 973 u64 timestamp, src_port, len; 974 u32 xfh[OCELOT_TAG_LEN / 4]; 975 struct net_device *dev; 976 struct timespec64 ts; 977 struct sk_buff *skb; 978 int sz, buf_len; 979 u32 val, *buf; 980 int err; 981 982 err = ocelot_xtr_poll_xfh(ocelot, grp, xfh); 983 if (err) 984 return err; 985 986 ocelot_xfh_get_src_port(xfh, &src_port); 987 ocelot_xfh_get_len(xfh, &len); 988 ocelot_xfh_get_rew_val(xfh, ×tamp); 989 990 if (WARN_ON(src_port >= ocelot->num_phys_ports)) 991 return -EINVAL; 992 993 dev = ocelot->ops->port_to_netdev(ocelot, src_port); 994 if (!dev) 995 return -EINVAL; 996 997 skb = netdev_alloc_skb(dev, len); 998 if (unlikely(!skb)) { 999 netdev_err(dev, "Unable to allocate sk_buff\n"); 1000 return -ENOMEM; 1001 } 1002 1003 buf_len = len - ETH_FCS_LEN; 1004 buf = (u32 *)skb_put(skb, buf_len); 1005 1006 len = 0; 1007 do { 1008 sz = ocelot_rx_frame_word(ocelot, grp, false, &val); 1009 if (sz < 0) { 1010 err = sz; 1011 goto out_free_skb; 1012 } 1013 *buf++ = val; 1014 len += sz; 1015 } while (len < buf_len); 1016 1017 /* Read the FCS */ 1018 sz = ocelot_rx_frame_word(ocelot, grp, false, &val); 1019 if (sz < 0) { 1020 err = sz; 1021 goto out_free_skb; 1022 } 1023 1024 /* Update the statistics if part of the FCS was read before */ 1025 len -= ETH_FCS_LEN - sz; 1026 1027 if (unlikely(dev->features & NETIF_F_RXFCS)) { 1028 buf = (u32 *)skb_put(skb, ETH_FCS_LEN); 1029 *buf = val; 1030 } 1031 1032 if (ocelot->ptp) { 1033 ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); 1034 1035 tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec); 1036 if ((tod_in_ns & 0xffffffff) < timestamp) 1037 full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) | 1038 timestamp; 1039 else 1040 full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) | 1041 timestamp; 1042 1043 shhwtstamps = skb_hwtstamps(skb); 1044 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); 1045 shhwtstamps->hwtstamp = full_ts_in_ns; 1046 } 1047 1048 /* Everything we see on an interface that is in the HW bridge 1049 * has already been forwarded. 1050 */ 1051 if (ocelot->ports[src_port]->bridge) 1052 skb->offload_fwd_mark = 1; 1053 1054 skb->protocol = eth_type_trans(skb, dev); 1055 1056 *nskb = skb; 1057 1058 return 0; 1059 1060 out_free_skb: 1061 kfree_skb(skb); 1062 return err; 1063 } 1064 EXPORT_SYMBOL(ocelot_xtr_poll_frame); 1065 1066 bool ocelot_can_inject(struct ocelot *ocelot, int grp) 1067 { 1068 u32 val = ocelot_read(ocelot, QS_INJ_STATUS); 1069 1070 if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp)))) 1071 return false; 1072 if (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp))) 1073 return false; 1074 1075 return true; 1076 } 1077 EXPORT_SYMBOL(ocelot_can_inject); 1078 1079 void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp, 1080 u32 rew_op, struct sk_buff *skb) 1081 { 1082 u32 ifh[OCELOT_TAG_LEN / 4] = {0}; 1083 unsigned int i, count, last; 1084 1085 ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) | 1086 QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp); 1087 1088 ocelot_ifh_set_bypass(ifh, 1); 1089 ocelot_ifh_set_dest(ifh, BIT_ULL(port)); 1090 ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C); 1091 ocelot_ifh_set_vlan_tci(ifh, skb_vlan_tag_get(skb)); 1092 ocelot_ifh_set_rew_op(ifh, rew_op); 1093 1094 for (i = 0; i < OCELOT_TAG_LEN / 4; i++) 1095 ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp); 1096 1097 count = DIV_ROUND_UP(skb->len, 4); 1098 last = skb->len % 4; 1099 for (i = 0; i < count; i++) 1100 ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp); 1101 1102 /* Add padding */ 1103 while (i < (OCELOT_BUFFER_CELL_SZ / 4)) { 1104 ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp); 1105 i++; 1106 } 1107 1108 /* Indicate EOF and valid bytes in last word */ 1109 ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) | 1110 QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) | 1111 QS_INJ_CTRL_EOF, 1112 QS_INJ_CTRL, grp); 1113 1114 /* Add dummy CRC */ 1115 ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp); 1116 skb_tx_timestamp(skb); 1117 1118 skb->dev->stats.tx_packets++; 1119 skb->dev->stats.tx_bytes += skb->len; 1120 } 1121 EXPORT_SYMBOL(ocelot_port_inject_frame); 1122 1123 void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp) 1124 { 1125 while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) 1126 ocelot_read_rix(ocelot, QS_XTR_RD, grp); 1127 } 1128 EXPORT_SYMBOL(ocelot_drain_cpu_queue); 1129 1130 int ocelot_fdb_add(struct ocelot *ocelot, int port, 1131 const unsigned char *addr, u16 vid) 1132 { 1133 int pgid = port; 1134 1135 if (port == ocelot->npi) 1136 pgid = PGID_CPU; 1137 1138 return ocelot_mact_learn(ocelot, pgid, addr, vid, ENTRYTYPE_LOCKED); 1139 } 1140 EXPORT_SYMBOL(ocelot_fdb_add); 1141 1142 int ocelot_fdb_del(struct ocelot *ocelot, int port, 1143 const unsigned char *addr, u16 vid) 1144 { 1145 return ocelot_mact_forget(ocelot, addr, vid); 1146 } 1147 EXPORT_SYMBOL(ocelot_fdb_del); 1148 1149 int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid, 1150 bool is_static, void *data) 1151 { 1152 struct ocelot_dump_ctx *dump = data; 1153 u32 portid = NETLINK_CB(dump->cb->skb).portid; 1154 u32 seq = dump->cb->nlh->nlmsg_seq; 1155 struct nlmsghdr *nlh; 1156 struct ndmsg *ndm; 1157 1158 if (dump->idx < dump->cb->args[2]) 1159 goto skip; 1160 1161 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, 1162 sizeof(*ndm), NLM_F_MULTI); 1163 if (!nlh) 1164 return -EMSGSIZE; 1165 1166 ndm = nlmsg_data(nlh); 1167 ndm->ndm_family = AF_BRIDGE; 1168 ndm->ndm_pad1 = 0; 1169 ndm->ndm_pad2 = 0; 1170 ndm->ndm_flags = NTF_SELF; 1171 ndm->ndm_type = 0; 1172 ndm->ndm_ifindex = dump->dev->ifindex; 1173 ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE; 1174 1175 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr)) 1176 goto nla_put_failure; 1177 1178 if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid)) 1179 goto nla_put_failure; 1180 1181 nlmsg_end(dump->skb, nlh); 1182 1183 skip: 1184 dump->idx++; 1185 return 0; 1186 1187 nla_put_failure: 1188 nlmsg_cancel(dump->skb, nlh); 1189 return -EMSGSIZE; 1190 } 1191 EXPORT_SYMBOL(ocelot_port_fdb_do_dump); 1192 1193 /* Caller must hold &ocelot->mact_lock */ 1194 static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col, 1195 struct ocelot_mact_entry *entry) 1196 { 1197 u32 val, dst, macl, mach; 1198 char mac[ETH_ALEN]; 1199 1200 /* Set row and column to read from */ 1201 ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_M_INDEX, row); 1202 ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_BUCKET, col); 1203 1204 /* Issue a read command */ 1205 ocelot_write(ocelot, 1206 ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_READ), 1207 ANA_TABLES_MACACCESS); 1208 1209 if (ocelot_mact_wait_for_completion(ocelot)) 1210 return -ETIMEDOUT; 1211 1212 /* Read the entry flags */ 1213 val = ocelot_read(ocelot, ANA_TABLES_MACACCESS); 1214 if (!(val & ANA_TABLES_MACACCESS_VALID)) 1215 return -EINVAL; 1216 1217 /* If the entry read has another port configured as its destination, 1218 * do not report it. 1219 */ 1220 dst = (val & ANA_TABLES_MACACCESS_DEST_IDX_M) >> 3; 1221 if (dst != port) 1222 return -EINVAL; 1223 1224 /* Get the entry's MAC address and VLAN id */ 1225 macl = ocelot_read(ocelot, ANA_TABLES_MACLDATA); 1226 mach = ocelot_read(ocelot, ANA_TABLES_MACHDATA); 1227 1228 mac[0] = (mach >> 8) & 0xff; 1229 mac[1] = (mach >> 0) & 0xff; 1230 mac[2] = (macl >> 24) & 0xff; 1231 mac[3] = (macl >> 16) & 0xff; 1232 mac[4] = (macl >> 8) & 0xff; 1233 mac[5] = (macl >> 0) & 0xff; 1234 1235 entry->vid = (mach >> 16) & 0xfff; 1236 ether_addr_copy(entry->mac, mac); 1237 1238 return 0; 1239 } 1240 1241 int ocelot_fdb_dump(struct ocelot *ocelot, int port, 1242 dsa_fdb_dump_cb_t *cb, void *data) 1243 { 1244 int err = 0; 1245 int i, j; 1246 1247 /* We could take the lock just around ocelot_mact_read, but doing so 1248 * thousands of times in a row seems rather pointless and inefficient. 1249 */ 1250 mutex_lock(&ocelot->mact_lock); 1251 1252 /* Loop through all the mac tables entries. */ 1253 for (i = 0; i < ocelot->num_mact_rows; i++) { 1254 for (j = 0; j < 4; j++) { 1255 struct ocelot_mact_entry entry; 1256 bool is_static; 1257 1258 err = ocelot_mact_read(ocelot, port, i, j, &entry); 1259 /* If the entry is invalid (wrong port, invalid...), 1260 * skip it. 1261 */ 1262 if (err == -EINVAL) 1263 continue; 1264 else if (err) 1265 break; 1266 1267 is_static = (entry.type == ENTRYTYPE_LOCKED); 1268 1269 err = cb(entry.mac, entry.vid, is_static, data); 1270 if (err) 1271 break; 1272 } 1273 } 1274 1275 mutex_unlock(&ocelot->mact_lock); 1276 1277 return err; 1278 } 1279 EXPORT_SYMBOL(ocelot_fdb_dump); 1280 1281 static void ocelot_populate_l2_ptp_trap_key(struct ocelot_vcap_filter *trap) 1282 { 1283 trap->key_type = OCELOT_VCAP_KEY_ETYPE; 1284 *(__be16 *)trap->key.etype.etype.value = htons(ETH_P_1588); 1285 *(__be16 *)trap->key.etype.etype.mask = htons(0xffff); 1286 } 1287 1288 static void 1289 ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap) 1290 { 1291 trap->key_type = OCELOT_VCAP_KEY_IPV4; 1292 trap->key.ipv4.dport.value = PTP_EV_PORT; 1293 trap->key.ipv4.dport.mask = 0xffff; 1294 } 1295 1296 static void 1297 ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap) 1298 { 1299 trap->key_type = OCELOT_VCAP_KEY_IPV6; 1300 trap->key.ipv6.dport.value = PTP_EV_PORT; 1301 trap->key.ipv6.dport.mask = 0xffff; 1302 } 1303 1304 static void 1305 ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap) 1306 { 1307 trap->key_type = OCELOT_VCAP_KEY_IPV4; 1308 trap->key.ipv4.dport.value = PTP_GEN_PORT; 1309 trap->key.ipv4.dport.mask = 0xffff; 1310 } 1311 1312 static void 1313 ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap) 1314 { 1315 trap->key_type = OCELOT_VCAP_KEY_IPV6; 1316 trap->key.ipv6.dport.value = PTP_GEN_PORT; 1317 trap->key.ipv6.dport.mask = 0xffff; 1318 } 1319 1320 static int ocelot_trap_add(struct ocelot *ocelot, int port, 1321 unsigned long cookie, 1322 void (*populate)(struct ocelot_vcap_filter *f)) 1323 { 1324 struct ocelot_vcap_block *block_vcap_is2; 1325 struct ocelot_vcap_filter *trap; 1326 bool new = false; 1327 int err; 1328 1329 block_vcap_is2 = &ocelot->block[VCAP_IS2]; 1330 1331 trap = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie, 1332 false); 1333 if (!trap) { 1334 trap = kzalloc(sizeof(*trap), GFP_KERNEL); 1335 if (!trap) 1336 return -ENOMEM; 1337 1338 populate(trap); 1339 trap->prio = 1; 1340 trap->id.cookie = cookie; 1341 trap->id.tc_offload = false; 1342 trap->block_id = VCAP_IS2; 1343 trap->type = OCELOT_VCAP_FILTER_OFFLOAD; 1344 trap->lookup = 0; 1345 trap->action.cpu_copy_ena = true; 1346 trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; 1347 trap->action.port_mask = 0; 1348 new = true; 1349 } 1350 1351 trap->ingress_port_mask |= BIT(port); 1352 1353 if (new) 1354 err = ocelot_vcap_filter_add(ocelot, trap, NULL); 1355 else 1356 err = ocelot_vcap_filter_replace(ocelot, trap); 1357 if (err) { 1358 trap->ingress_port_mask &= ~BIT(port); 1359 if (!trap->ingress_port_mask) 1360 kfree(trap); 1361 return err; 1362 } 1363 1364 return 0; 1365 } 1366 1367 static int ocelot_trap_del(struct ocelot *ocelot, int port, 1368 unsigned long cookie) 1369 { 1370 struct ocelot_vcap_block *block_vcap_is2; 1371 struct ocelot_vcap_filter *trap; 1372 1373 block_vcap_is2 = &ocelot->block[VCAP_IS2]; 1374 1375 trap = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie, 1376 false); 1377 if (!trap) 1378 return 0; 1379 1380 trap->ingress_port_mask &= ~BIT(port); 1381 if (!trap->ingress_port_mask) 1382 return ocelot_vcap_filter_del(ocelot, trap); 1383 1384 return ocelot_vcap_filter_replace(ocelot, trap); 1385 } 1386 1387 static int ocelot_l2_ptp_trap_add(struct ocelot *ocelot, int port) 1388 { 1389 unsigned long l2_cookie = ocelot->num_phys_ports + 1; 1390 1391 return ocelot_trap_add(ocelot, port, l2_cookie, 1392 ocelot_populate_l2_ptp_trap_key); 1393 } 1394 1395 static int ocelot_l2_ptp_trap_del(struct ocelot *ocelot, int port) 1396 { 1397 unsigned long l2_cookie = ocelot->num_phys_ports + 1; 1398 1399 return ocelot_trap_del(ocelot, port, l2_cookie); 1400 } 1401 1402 static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port) 1403 { 1404 unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2; 1405 unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3; 1406 int err; 1407 1408 err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie, 1409 ocelot_populate_ipv4_ptp_event_trap_key); 1410 if (err) 1411 return err; 1412 1413 err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie, 1414 ocelot_populate_ipv4_ptp_general_trap_key); 1415 if (err) 1416 ocelot_trap_del(ocelot, port, ipv4_ev_cookie); 1417 1418 return err; 1419 } 1420 1421 static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port) 1422 { 1423 unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2; 1424 unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3; 1425 int err; 1426 1427 err = ocelot_trap_del(ocelot, port, ipv4_ev_cookie); 1428 err |= ocelot_trap_del(ocelot, port, ipv4_gen_cookie); 1429 return err; 1430 } 1431 1432 static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port) 1433 { 1434 unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4; 1435 unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5; 1436 int err; 1437 1438 err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie, 1439 ocelot_populate_ipv6_ptp_event_trap_key); 1440 if (err) 1441 return err; 1442 1443 err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie, 1444 ocelot_populate_ipv6_ptp_general_trap_key); 1445 if (err) 1446 ocelot_trap_del(ocelot, port, ipv6_ev_cookie); 1447 1448 return err; 1449 } 1450 1451 static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port) 1452 { 1453 unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4; 1454 unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5; 1455 int err; 1456 1457 err = ocelot_trap_del(ocelot, port, ipv6_ev_cookie); 1458 err |= ocelot_trap_del(ocelot, port, ipv6_gen_cookie); 1459 return err; 1460 } 1461 1462 static int ocelot_setup_ptp_traps(struct ocelot *ocelot, int port, 1463 bool l2, bool l4) 1464 { 1465 int err; 1466 1467 if (l2) 1468 err = ocelot_l2_ptp_trap_add(ocelot, port); 1469 else 1470 err = ocelot_l2_ptp_trap_del(ocelot, port); 1471 if (err) 1472 return err; 1473 1474 if (l4) { 1475 err = ocelot_ipv4_ptp_trap_add(ocelot, port); 1476 if (err) 1477 goto err_ipv4; 1478 1479 err = ocelot_ipv6_ptp_trap_add(ocelot, port); 1480 if (err) 1481 goto err_ipv6; 1482 } else { 1483 err = ocelot_ipv4_ptp_trap_del(ocelot, port); 1484 1485 err |= ocelot_ipv6_ptp_trap_del(ocelot, port); 1486 } 1487 if (err) 1488 return err; 1489 1490 return 0; 1491 1492 err_ipv6: 1493 ocelot_ipv4_ptp_trap_del(ocelot, port); 1494 err_ipv4: 1495 if (l2) 1496 ocelot_l2_ptp_trap_del(ocelot, port); 1497 return err; 1498 } 1499 1500 int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr) 1501 { 1502 return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config, 1503 sizeof(ocelot->hwtstamp_config)) ? -EFAULT : 0; 1504 } 1505 EXPORT_SYMBOL(ocelot_hwstamp_get); 1506 1507 int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr) 1508 { 1509 struct ocelot_port *ocelot_port = ocelot->ports[port]; 1510 bool l2 = false, l4 = false; 1511 struct hwtstamp_config cfg; 1512 int err; 1513 1514 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 1515 return -EFAULT; 1516 1517 /* reserved for future extensions */ 1518 if (cfg.flags) 1519 return -EINVAL; 1520 1521 /* Tx type sanity check */ 1522 switch (cfg.tx_type) { 1523 case HWTSTAMP_TX_ON: 1524 ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP; 1525 break; 1526 case HWTSTAMP_TX_ONESTEP_SYNC: 1527 /* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we 1528 * need to update the origin time. 1529 */ 1530 ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP; 1531 break; 1532 case HWTSTAMP_TX_OFF: 1533 ocelot_port->ptp_cmd = 0; 1534 break; 1535 default: 1536 return -ERANGE; 1537 } 1538 1539 mutex_lock(&ocelot->ptp_lock); 1540 1541 switch (cfg.rx_filter) { 1542 case HWTSTAMP_FILTER_NONE: 1543 break; 1544 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1545 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1546 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1547 l4 = true; 1548 break; 1549 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1550 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1551 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1552 l2 = true; 1553 break; 1554 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1555 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1556 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1557 l2 = true; 1558 l4 = true; 1559 break; 1560 default: 1561 mutex_unlock(&ocelot->ptp_lock); 1562 return -ERANGE; 1563 } 1564 1565 err = ocelot_setup_ptp_traps(ocelot, port, l2, l4); 1566 if (err) { 1567 mutex_unlock(&ocelot->ptp_lock); 1568 return err; 1569 } 1570 1571 if (l2 && l4) 1572 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 1573 else if (l2) 1574 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 1575 else if (l4) 1576 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 1577 else 1578 cfg.rx_filter = HWTSTAMP_FILTER_NONE; 1579 1580 /* Commit back the result & save it */ 1581 memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg)); 1582 mutex_unlock(&ocelot->ptp_lock); 1583 1584 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 1585 } 1586 EXPORT_SYMBOL(ocelot_hwstamp_set); 1587 1588 void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data) 1589 { 1590 int i; 1591 1592 if (sset != ETH_SS_STATS) 1593 return; 1594 1595 for (i = 0; i < ocelot->num_stats; i++) 1596 memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name, 1597 ETH_GSTRING_LEN); 1598 } 1599 EXPORT_SYMBOL(ocelot_get_strings); 1600 1601 static void ocelot_update_stats(struct ocelot *ocelot) 1602 { 1603 int i, j; 1604 1605 mutex_lock(&ocelot->stats_lock); 1606 1607 for (i = 0; i < ocelot->num_phys_ports; i++) { 1608 /* Configure the port to read the stats from */ 1609 ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(i), SYS_STAT_CFG); 1610 1611 for (j = 0; j < ocelot->num_stats; j++) { 1612 u32 val; 1613 unsigned int idx = i * ocelot->num_stats + j; 1614 1615 val = ocelot_read_rix(ocelot, SYS_COUNT_RX_OCTETS, 1616 ocelot->stats_layout[j].offset); 1617 1618 if (val < (ocelot->stats[idx] & U32_MAX)) 1619 ocelot->stats[idx] += (u64)1 << 32; 1620 1621 ocelot->stats[idx] = (ocelot->stats[idx] & 1622 ~(u64)U32_MAX) + val; 1623 } 1624 } 1625 1626 mutex_unlock(&ocelot->stats_lock); 1627 } 1628 1629 static void ocelot_check_stats_work(struct work_struct *work) 1630 { 1631 struct delayed_work *del_work = to_delayed_work(work); 1632 struct ocelot *ocelot = container_of(del_work, struct ocelot, 1633 stats_work); 1634 1635 ocelot_update_stats(ocelot); 1636 1637 queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, 1638 OCELOT_STATS_CHECK_DELAY); 1639 } 1640 1641 void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data) 1642 { 1643 int i; 1644 1645 /* check and update now */ 1646 ocelot_update_stats(ocelot); 1647 1648 /* Copy all counters */ 1649 for (i = 0; i < ocelot->num_stats; i++) 1650 *data++ = ocelot->stats[port * ocelot->num_stats + i]; 1651 } 1652 EXPORT_SYMBOL(ocelot_get_ethtool_stats); 1653 1654 int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset) 1655 { 1656 if (sset != ETH_SS_STATS) 1657 return -EOPNOTSUPP; 1658 1659 return ocelot->num_stats; 1660 } 1661 EXPORT_SYMBOL(ocelot_get_sset_count); 1662 1663 int ocelot_get_ts_info(struct ocelot *ocelot, int port, 1664 struct ethtool_ts_info *info) 1665 { 1666 info->phc_index = ocelot->ptp_clock ? 1667 ptp_clock_index(ocelot->ptp_clock) : -1; 1668 if (info->phc_index == -1) { 1669 info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | 1670 SOF_TIMESTAMPING_RX_SOFTWARE | 1671 SOF_TIMESTAMPING_SOFTWARE; 1672 return 0; 1673 } 1674 info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | 1675 SOF_TIMESTAMPING_RX_SOFTWARE | 1676 SOF_TIMESTAMPING_SOFTWARE | 1677 SOF_TIMESTAMPING_TX_HARDWARE | 1678 SOF_TIMESTAMPING_RX_HARDWARE | 1679 SOF_TIMESTAMPING_RAW_HARDWARE; 1680 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) | 1681 BIT(HWTSTAMP_TX_ONESTEP_SYNC); 1682 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | 1683 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | 1684 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 1685 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 1686 1687 return 0; 1688 } 1689 EXPORT_SYMBOL(ocelot_get_ts_info); 1690 1691 static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond, 1692 bool only_active_ports) 1693 { 1694 u32 mask = 0; 1695 int port; 1696 1697 for (port = 0; port < ocelot->num_phys_ports; port++) { 1698 struct ocelot_port *ocelot_port = ocelot->ports[port]; 1699 1700 if (!ocelot_port) 1701 continue; 1702 1703 if (ocelot_port->bond == bond) { 1704 if (only_active_ports && !ocelot_port->lag_tx_active) 1705 continue; 1706 1707 mask |= BIT(port); 1708 } 1709 } 1710 1711 return mask; 1712 } 1713 1714 static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port, 1715 struct net_device *bridge) 1716 { 1717 struct ocelot_port *ocelot_port = ocelot->ports[src_port]; 1718 u32 mask = 0; 1719 int port; 1720 1721 if (!ocelot_port || ocelot_port->bridge != bridge || 1722 ocelot_port->stp_state != BR_STATE_FORWARDING) 1723 return 0; 1724 1725 for (port = 0; port < ocelot->num_phys_ports; port++) { 1726 ocelot_port = ocelot->ports[port]; 1727 1728 if (!ocelot_port) 1729 continue; 1730 1731 if (ocelot_port->stp_state == BR_STATE_FORWARDING && 1732 ocelot_port->bridge == bridge) 1733 mask |= BIT(port); 1734 } 1735 1736 return mask; 1737 } 1738 1739 static u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot) 1740 { 1741 u32 mask = 0; 1742 int port; 1743 1744 for (port = 0; port < ocelot->num_phys_ports; port++) { 1745 struct ocelot_port *ocelot_port = ocelot->ports[port]; 1746 1747 if (!ocelot_port) 1748 continue; 1749 1750 if (ocelot_port->is_dsa_8021q_cpu) 1751 mask |= BIT(port); 1752 } 1753 1754 return mask; 1755 } 1756 1757 void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot) 1758 { 1759 unsigned long cpu_fwd_mask; 1760 int port; 1761 1762 /* If a DSA tag_8021q CPU exists, it needs to be included in the 1763 * regular forwarding path of the front ports regardless of whether 1764 * those are bridged or standalone. 1765 * If DSA tag_8021q is not used, this returns 0, which is fine because 1766 * the hardware-based CPU port module can be a destination for packets 1767 * even if it isn't part of PGID_SRC. 1768 */ 1769 cpu_fwd_mask = ocelot_get_dsa_8021q_cpu_mask(ocelot); 1770 1771 /* Apply FWD mask. The loop is needed to add/remove the current port as 1772 * a source for the other ports. 1773 */ 1774 for (port = 0; port < ocelot->num_phys_ports; port++) { 1775 struct ocelot_port *ocelot_port = ocelot->ports[port]; 1776 unsigned long mask; 1777 1778 if (!ocelot_port) { 1779 /* Unused ports can't send anywhere */ 1780 mask = 0; 1781 } else if (ocelot_port->is_dsa_8021q_cpu) { 1782 /* The DSA tag_8021q CPU ports need to be able to 1783 * forward packets to all other ports except for 1784 * themselves 1785 */ 1786 mask = GENMASK(ocelot->num_phys_ports - 1, 0); 1787 mask &= ~cpu_fwd_mask; 1788 } else if (ocelot_port->bridge) { 1789 struct net_device *bridge = ocelot_port->bridge; 1790 struct net_device *bond = ocelot_port->bond; 1791 1792 mask = ocelot_get_bridge_fwd_mask(ocelot, port, bridge); 1793 mask |= cpu_fwd_mask; 1794 mask &= ~BIT(port); 1795 if (bond) { 1796 mask &= ~ocelot_get_bond_mask(ocelot, bond, 1797 false); 1798 } 1799 } else { 1800 /* Standalone ports forward only to DSA tag_8021q CPU 1801 * ports (if those exist), or to the hardware CPU port 1802 * module otherwise. 1803 */ 1804 mask = cpu_fwd_mask; 1805 } 1806 1807 ocelot_write_rix(ocelot, mask, ANA_PGID_PGID, PGID_SRC + port); 1808 } 1809 } 1810 EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask); 1811 1812 void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state) 1813 { 1814 struct ocelot_port *ocelot_port = ocelot->ports[port]; 1815 u32 learn_ena = 0; 1816 1817 ocelot_port->stp_state = state; 1818 1819 if ((state == BR_STATE_LEARNING || state == BR_STATE_FORWARDING) && 1820 ocelot_port->learn_ena) 1821 learn_ena = ANA_PORT_PORT_CFG_LEARN_ENA; 1822 1823 ocelot_rmw_gix(ocelot, learn_ena, ANA_PORT_PORT_CFG_LEARN_ENA, 1824 ANA_PORT_PORT_CFG, port); 1825 1826 ocelot_apply_bridge_fwd_mask(ocelot); 1827 } 1828 EXPORT_SYMBOL(ocelot_bridge_stp_state_set); 1829 1830 void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs) 1831 { 1832 unsigned int age_period = ANA_AUTOAGE_AGE_PERIOD(msecs / 2000); 1833 1834 /* Setting AGE_PERIOD to zero effectively disables automatic aging, 1835 * which is clearly not what our intention is. So avoid that. 1836 */ 1837 if (!age_period) 1838 age_period = 1; 1839 1840 ocelot_rmw(ocelot, age_period, ANA_AUTOAGE_AGE_PERIOD_M, ANA_AUTOAGE); 1841 } 1842 EXPORT_SYMBOL(ocelot_set_ageing_time); 1843 1844 static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot, 1845 const unsigned char *addr, 1846 u16 vid) 1847 { 1848 struct ocelot_multicast *mc; 1849 1850 list_for_each_entry(mc, &ocelot->multicast, list) { 1851 if (ether_addr_equal(mc->addr, addr) && mc->vid == vid) 1852 return mc; 1853 } 1854 1855 return NULL; 1856 } 1857 1858 static enum macaccess_entry_type ocelot_classify_mdb(const unsigned char *addr) 1859 { 1860 if (addr[0] == 0x01 && addr[1] == 0x00 && addr[2] == 0x5e) 1861 return ENTRYTYPE_MACv4; 1862 if (addr[0] == 0x33 && addr[1] == 0x33) 1863 return ENTRYTYPE_MACv6; 1864 return ENTRYTYPE_LOCKED; 1865 } 1866 1867 static struct ocelot_pgid *ocelot_pgid_alloc(struct ocelot *ocelot, int index, 1868 unsigned long ports) 1869 { 1870 struct ocelot_pgid *pgid; 1871 1872 pgid = kzalloc(sizeof(*pgid), GFP_KERNEL); 1873 if (!pgid) 1874 return ERR_PTR(-ENOMEM); 1875 1876 pgid->ports = ports; 1877 pgid->index = index; 1878 refcount_set(&pgid->refcount, 1); 1879 list_add_tail(&pgid->list, &ocelot->pgids); 1880 1881 return pgid; 1882 } 1883 1884 static void ocelot_pgid_free(struct ocelot *ocelot, struct ocelot_pgid *pgid) 1885 { 1886 if (!refcount_dec_and_test(&pgid->refcount)) 1887 return; 1888 1889 list_del(&pgid->list); 1890 kfree(pgid); 1891 } 1892 1893 static struct ocelot_pgid *ocelot_mdb_get_pgid(struct ocelot *ocelot, 1894 const struct ocelot_multicast *mc) 1895 { 1896 struct ocelot_pgid *pgid; 1897 int index; 1898 1899 /* According to VSC7514 datasheet 3.9.1.5 IPv4 Multicast Entries and 1900 * 3.9.1.6 IPv6 Multicast Entries, "Instead of a lookup in the 1901 * destination mask table (PGID), the destination set is programmed as 1902 * part of the entry MAC address.", and the DEST_IDX is set to 0. 1903 */ 1904 if (mc->entry_type == ENTRYTYPE_MACv4 || 1905 mc->entry_type == ENTRYTYPE_MACv6) 1906 return ocelot_pgid_alloc(ocelot, 0, mc->ports); 1907 1908 list_for_each_entry(pgid, &ocelot->pgids, list) { 1909 /* When searching for a nonreserved multicast PGID, ignore the 1910 * dummy PGID of zero that we have for MACv4/MACv6 entries 1911 */ 1912 if (pgid->index && pgid->ports == mc->ports) { 1913 refcount_inc(&pgid->refcount); 1914 return pgid; 1915 } 1916 } 1917 1918 /* Search for a free index in the nonreserved multicast PGID area */ 1919 for_each_nonreserved_multicast_dest_pgid(ocelot, index) { 1920 bool used = false; 1921 1922 list_for_each_entry(pgid, &ocelot->pgids, list) { 1923 if (pgid->index == index) { 1924 used = true; 1925 break; 1926 } 1927 } 1928 1929 if (!used) 1930 return ocelot_pgid_alloc(ocelot, index, mc->ports); 1931 } 1932 1933 return ERR_PTR(-ENOSPC); 1934 } 1935 1936 static void ocelot_encode_ports_to_mdb(unsigned char *addr, 1937 struct ocelot_multicast *mc) 1938 { 1939 ether_addr_copy(addr, mc->addr); 1940 1941 if (mc->entry_type == ENTRYTYPE_MACv4) { 1942 addr[0] = 0; 1943 addr[1] = mc->ports >> 8; 1944 addr[2] = mc->ports & 0xff; 1945 } else if (mc->entry_type == ENTRYTYPE_MACv6) { 1946 addr[0] = mc->ports >> 8; 1947 addr[1] = mc->ports & 0xff; 1948 } 1949 } 1950 1951 int ocelot_port_mdb_add(struct ocelot *ocelot, int port, 1952 const struct switchdev_obj_port_mdb *mdb) 1953 { 1954 unsigned char addr[ETH_ALEN]; 1955 struct ocelot_multicast *mc; 1956 struct ocelot_pgid *pgid; 1957 u16 vid = mdb->vid; 1958 1959 if (port == ocelot->npi) 1960 port = ocelot->num_phys_ports; 1961 1962 mc = ocelot_multicast_get(ocelot, mdb->addr, vid); 1963 if (!mc) { 1964 /* New entry */ 1965 mc = devm_kzalloc(ocelot->dev, sizeof(*mc), GFP_KERNEL); 1966 if (!mc) 1967 return -ENOMEM; 1968 1969 mc->entry_type = ocelot_classify_mdb(mdb->addr); 1970 ether_addr_copy(mc->addr, mdb->addr); 1971 mc->vid = vid; 1972 1973 list_add_tail(&mc->list, &ocelot->multicast); 1974 } else { 1975 /* Existing entry. Clean up the current port mask from 1976 * hardware now, because we'll be modifying it. 1977 */ 1978 ocelot_pgid_free(ocelot, mc->pgid); 1979 ocelot_encode_ports_to_mdb(addr, mc); 1980 ocelot_mact_forget(ocelot, addr, vid); 1981 } 1982 1983 mc->ports |= BIT(port); 1984 1985 pgid = ocelot_mdb_get_pgid(ocelot, mc); 1986 if (IS_ERR(pgid)) { 1987 dev_err(ocelot->dev, 1988 "Cannot allocate PGID for mdb %pM vid %d\n", 1989 mc->addr, mc->vid); 1990 devm_kfree(ocelot->dev, mc); 1991 return PTR_ERR(pgid); 1992 } 1993 mc->pgid = pgid; 1994 1995 ocelot_encode_ports_to_mdb(addr, mc); 1996 1997 if (mc->entry_type != ENTRYTYPE_MACv4 && 1998 mc->entry_type != ENTRYTYPE_MACv6) 1999 ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID, 2000 pgid->index); 2001 2002 return ocelot_mact_learn(ocelot, pgid->index, addr, vid, 2003 mc->entry_type); 2004 } 2005 EXPORT_SYMBOL(ocelot_port_mdb_add); 2006 2007 int ocelot_port_mdb_del(struct ocelot *ocelot, int port, 2008 const struct switchdev_obj_port_mdb *mdb) 2009 { 2010 unsigned char addr[ETH_ALEN]; 2011 struct ocelot_multicast *mc; 2012 struct ocelot_pgid *pgid; 2013 u16 vid = mdb->vid; 2014 2015 if (port == ocelot->npi) 2016 port = ocelot->num_phys_ports; 2017 2018 mc = ocelot_multicast_get(ocelot, mdb->addr, vid); 2019 if (!mc) 2020 return -ENOENT; 2021 2022 ocelot_encode_ports_to_mdb(addr, mc); 2023 ocelot_mact_forget(ocelot, addr, vid); 2024 2025 ocelot_pgid_free(ocelot, mc->pgid); 2026 mc->ports &= ~BIT(port); 2027 if (!mc->ports) { 2028 list_del(&mc->list); 2029 devm_kfree(ocelot->dev, mc); 2030 return 0; 2031 } 2032 2033 /* We have a PGID with fewer ports now */ 2034 pgid = ocelot_mdb_get_pgid(ocelot, mc); 2035 if (IS_ERR(pgid)) 2036 return PTR_ERR(pgid); 2037 mc->pgid = pgid; 2038 2039 ocelot_encode_ports_to_mdb(addr, mc); 2040 2041 if (mc->entry_type != ENTRYTYPE_MACv4 && 2042 mc->entry_type != ENTRYTYPE_MACv6) 2043 ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID, 2044 pgid->index); 2045 2046 return ocelot_mact_learn(ocelot, pgid->index, addr, vid, 2047 mc->entry_type); 2048 } 2049 EXPORT_SYMBOL(ocelot_port_mdb_del); 2050 2051 void ocelot_port_bridge_join(struct ocelot *ocelot, int port, 2052 struct net_device *bridge) 2053 { 2054 struct ocelot_port *ocelot_port = ocelot->ports[port]; 2055 2056 ocelot_port->bridge = bridge; 2057 2058 ocelot_apply_bridge_fwd_mask(ocelot); 2059 } 2060 EXPORT_SYMBOL(ocelot_port_bridge_join); 2061 2062 void ocelot_port_bridge_leave(struct ocelot *ocelot, int port, 2063 struct net_device *bridge) 2064 { 2065 struct ocelot_port *ocelot_port = ocelot->ports[port]; 2066 2067 ocelot_port->bridge = NULL; 2068 2069 ocelot_port_set_pvid(ocelot, port, NULL); 2070 ocelot_port_manage_port_tag(ocelot, port); 2071 ocelot_apply_bridge_fwd_mask(ocelot); 2072 } 2073 EXPORT_SYMBOL(ocelot_port_bridge_leave); 2074 2075 static void ocelot_set_aggr_pgids(struct ocelot *ocelot) 2076 { 2077 unsigned long visited = GENMASK(ocelot->num_phys_ports - 1, 0); 2078 int i, port, lag; 2079 2080 /* Reset destination and aggregation PGIDS */ 2081 for_each_unicast_dest_pgid(ocelot, port) 2082 ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port); 2083 2084 for_each_aggr_pgid(ocelot, i) 2085 ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0), 2086 ANA_PGID_PGID, i); 2087 2088 /* The visited ports bitmask holds the list of ports offloading any 2089 * bonding interface. Initially we mark all these ports as unvisited, 2090 * then every time we visit a port in this bitmask, we know that it is 2091 * the lowest numbered port, i.e. the one whose logical ID == physical 2092 * port ID == LAG ID. So we mark as visited all further ports in the 2093 * bitmask that are offloading the same bonding interface. This way, 2094 * we set up the aggregation PGIDs only once per bonding interface. 2095 */ 2096 for (port = 0; port < ocelot->num_phys_ports; port++) { 2097 struct ocelot_port *ocelot_port = ocelot->ports[port]; 2098 2099 if (!ocelot_port || !ocelot_port->bond) 2100 continue; 2101 2102 visited &= ~BIT(port); 2103 } 2104 2105 /* Now, set PGIDs for each active LAG */ 2106 for (lag = 0; lag < ocelot->num_phys_ports; lag++) { 2107 struct net_device *bond = ocelot->ports[lag]->bond; 2108 int num_active_ports = 0; 2109 unsigned long bond_mask; 2110 u8 aggr_idx[16]; 2111 2112 if (!bond || (visited & BIT(lag))) 2113 continue; 2114 2115 bond_mask = ocelot_get_bond_mask(ocelot, bond, true); 2116 2117 for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) { 2118 // Destination mask 2119 ocelot_write_rix(ocelot, bond_mask, 2120 ANA_PGID_PGID, port); 2121 aggr_idx[num_active_ports++] = port; 2122 } 2123 2124 for_each_aggr_pgid(ocelot, i) { 2125 u32 ac; 2126 2127 ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i); 2128 ac &= ~bond_mask; 2129 /* Don't do division by zero if there was no active 2130 * port. Just make all aggregation codes zero. 2131 */ 2132 if (num_active_ports) 2133 ac |= BIT(aggr_idx[i % num_active_ports]); 2134 ocelot_write_rix(ocelot, ac, ANA_PGID_PGID, i); 2135 } 2136 2137 /* Mark all ports in the same LAG as visited to avoid applying 2138 * the same config again. 2139 */ 2140 for (port = lag; port < ocelot->num_phys_ports; port++) { 2141 struct ocelot_port *ocelot_port = ocelot->ports[port]; 2142 2143 if (!ocelot_port) 2144 continue; 2145 2146 if (ocelot_port->bond == bond) 2147 visited |= BIT(port); 2148 } 2149 } 2150 } 2151 2152 /* When offloading a bonding interface, the switch ports configured under the 2153 * same bond must have the same logical port ID, equal to the physical port ID 2154 * of the lowest numbered physical port in that bond. Otherwise, in standalone/ 2155 * bridged mode, each port has a logical port ID equal to its physical port ID. 2156 */ 2157 static void ocelot_setup_logical_port_ids(struct ocelot *ocelot) 2158 { 2159 int port; 2160 2161 for (port = 0; port < ocelot->num_phys_ports; port++) { 2162 struct ocelot_port *ocelot_port = ocelot->ports[port]; 2163 struct net_device *bond; 2164 2165 if (!ocelot_port) 2166 continue; 2167 2168 bond = ocelot_port->bond; 2169 if (bond) { 2170 int lag = __ffs(ocelot_get_bond_mask(ocelot, bond, 2171 false)); 2172 2173 ocelot_rmw_gix(ocelot, 2174 ANA_PORT_PORT_CFG_PORTID_VAL(lag), 2175 ANA_PORT_PORT_CFG_PORTID_VAL_M, 2176 ANA_PORT_PORT_CFG, port); 2177 } else { 2178 ocelot_rmw_gix(ocelot, 2179 ANA_PORT_PORT_CFG_PORTID_VAL(port), 2180 ANA_PORT_PORT_CFG_PORTID_VAL_M, 2181 ANA_PORT_PORT_CFG, port); 2182 } 2183 } 2184 } 2185 2186 int ocelot_port_lag_join(struct ocelot *ocelot, int port, 2187 struct net_device *bond, 2188 struct netdev_lag_upper_info *info) 2189 { 2190 if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) 2191 return -EOPNOTSUPP; 2192 2193 ocelot->ports[port]->bond = bond; 2194 2195 ocelot_setup_logical_port_ids(ocelot); 2196 ocelot_apply_bridge_fwd_mask(ocelot); 2197 ocelot_set_aggr_pgids(ocelot); 2198 2199 return 0; 2200 } 2201 EXPORT_SYMBOL(ocelot_port_lag_join); 2202 2203 void ocelot_port_lag_leave(struct ocelot *ocelot, int port, 2204 struct net_device *bond) 2205 { 2206 ocelot->ports[port]->bond = NULL; 2207 2208 ocelot_setup_logical_port_ids(ocelot); 2209 ocelot_apply_bridge_fwd_mask(ocelot); 2210 ocelot_set_aggr_pgids(ocelot); 2211 } 2212 EXPORT_SYMBOL(ocelot_port_lag_leave); 2213 2214 void ocelot_port_lag_change(struct ocelot *ocelot, int port, bool lag_tx_active) 2215 { 2216 struct ocelot_port *ocelot_port = ocelot->ports[port]; 2217 2218 ocelot_port->lag_tx_active = lag_tx_active; 2219 2220 /* Rebalance the LAGs */ 2221 ocelot_set_aggr_pgids(ocelot); 2222 } 2223 EXPORT_SYMBOL(ocelot_port_lag_change); 2224 2225 /* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu. 2226 * The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG. 2227 * In the special case that it's the NPI port that we're configuring, the 2228 * length of the tag and optional prefix needs to be accounted for privately, 2229 * in order to be able to sustain communication at the requested @sdu. 2230 */ 2231 void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu) 2232 { 2233 struct ocelot_port *ocelot_port = ocelot->ports[port]; 2234 int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN; 2235 int pause_start, pause_stop; 2236 int atop, atop_tot; 2237 2238 if (port == ocelot->npi) { 2239 maxlen += OCELOT_TAG_LEN; 2240 2241 if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT) 2242 maxlen += OCELOT_SHORT_PREFIX_LEN; 2243 else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG) 2244 maxlen += OCELOT_LONG_PREFIX_LEN; 2245 } 2246 2247 ocelot_port_writel(ocelot_port, maxlen, DEV_MAC_MAXLEN_CFG); 2248 2249 /* Set Pause watermark hysteresis */ 2250 pause_start = 6 * maxlen / OCELOT_BUFFER_CELL_SZ; 2251 pause_stop = 4 * maxlen / OCELOT_BUFFER_CELL_SZ; 2252 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_START, 2253 pause_start); 2254 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_STOP, 2255 pause_stop); 2256 2257 /* Tail dropping watermarks */ 2258 atop_tot = (ocelot->packet_buffer_size - 9 * maxlen) / 2259 OCELOT_BUFFER_CELL_SZ; 2260 atop = (9 * maxlen) / OCELOT_BUFFER_CELL_SZ; 2261 ocelot_write_rix(ocelot, ocelot->ops->wm_enc(atop), SYS_ATOP, port); 2262 ocelot_write(ocelot, ocelot->ops->wm_enc(atop_tot), SYS_ATOP_TOT_CFG); 2263 } 2264 EXPORT_SYMBOL(ocelot_port_set_maxlen); 2265 2266 int ocelot_get_max_mtu(struct ocelot *ocelot, int port) 2267 { 2268 int max_mtu = 65535 - ETH_HLEN - ETH_FCS_LEN; 2269 2270 if (port == ocelot->npi) { 2271 max_mtu -= OCELOT_TAG_LEN; 2272 2273 if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT) 2274 max_mtu -= OCELOT_SHORT_PREFIX_LEN; 2275 else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG) 2276 max_mtu -= OCELOT_LONG_PREFIX_LEN; 2277 } 2278 2279 return max_mtu; 2280 } 2281 EXPORT_SYMBOL(ocelot_get_max_mtu); 2282 2283 static void ocelot_port_set_learning(struct ocelot *ocelot, int port, 2284 bool enabled) 2285 { 2286 struct ocelot_port *ocelot_port = ocelot->ports[port]; 2287 u32 val = 0; 2288 2289 if (enabled) 2290 val = ANA_PORT_PORT_CFG_LEARN_ENA; 2291 2292 ocelot_rmw_gix(ocelot, val, ANA_PORT_PORT_CFG_LEARN_ENA, 2293 ANA_PORT_PORT_CFG, port); 2294 2295 ocelot_port->learn_ena = enabled; 2296 } 2297 2298 static void ocelot_port_set_ucast_flood(struct ocelot *ocelot, int port, 2299 bool enabled) 2300 { 2301 u32 val = 0; 2302 2303 if (enabled) 2304 val = BIT(port); 2305 2306 ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_UC); 2307 } 2308 2309 static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port, 2310 bool enabled) 2311 { 2312 u32 val = 0; 2313 2314 if (enabled) 2315 val = BIT(port); 2316 2317 ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC); 2318 } 2319 2320 static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port, 2321 bool enabled) 2322 { 2323 u32 val = 0; 2324 2325 if (enabled) 2326 val = BIT(port); 2327 2328 ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_BC); 2329 } 2330 2331 int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port, 2332 struct switchdev_brport_flags flags) 2333 { 2334 if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 2335 BR_BCAST_FLOOD)) 2336 return -EINVAL; 2337 2338 return 0; 2339 } 2340 EXPORT_SYMBOL(ocelot_port_pre_bridge_flags); 2341 2342 void ocelot_port_bridge_flags(struct ocelot *ocelot, int port, 2343 struct switchdev_brport_flags flags) 2344 { 2345 if (flags.mask & BR_LEARNING) 2346 ocelot_port_set_learning(ocelot, port, 2347 !!(flags.val & BR_LEARNING)); 2348 2349 if (flags.mask & BR_FLOOD) 2350 ocelot_port_set_ucast_flood(ocelot, port, 2351 !!(flags.val & BR_FLOOD)); 2352 2353 if (flags.mask & BR_MCAST_FLOOD) 2354 ocelot_port_set_mcast_flood(ocelot, port, 2355 !!(flags.val & BR_MCAST_FLOOD)); 2356 2357 if (flags.mask & BR_BCAST_FLOOD) 2358 ocelot_port_set_bcast_flood(ocelot, port, 2359 !!(flags.val & BR_BCAST_FLOOD)); 2360 } 2361 EXPORT_SYMBOL(ocelot_port_bridge_flags); 2362 2363 void ocelot_init_port(struct ocelot *ocelot, int port) 2364 { 2365 struct ocelot_port *ocelot_port = ocelot->ports[port]; 2366 2367 skb_queue_head_init(&ocelot_port->tx_skbs); 2368 2369 /* Basic L2 initialization */ 2370 2371 /* Set MAC IFG Gaps 2372 * FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 0 2373 * !FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 5 2374 */ 2375 ocelot_port_writel(ocelot_port, DEV_MAC_IFG_CFG_TX_IFG(5), 2376 DEV_MAC_IFG_CFG); 2377 2378 /* Load seed (0) and set MAC HDX late collision */ 2379 ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67) | 2380 DEV_MAC_HDX_CFG_SEED_LOAD, 2381 DEV_MAC_HDX_CFG); 2382 mdelay(1); 2383 ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67), 2384 DEV_MAC_HDX_CFG); 2385 2386 /* Set Max Length and maximum tags allowed */ 2387 ocelot_port_set_maxlen(ocelot, port, ETH_DATA_LEN); 2388 ocelot_port_writel(ocelot_port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) | 2389 DEV_MAC_TAGS_CFG_VLAN_AWR_ENA | 2390 DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA | 2391 DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, 2392 DEV_MAC_TAGS_CFG); 2393 2394 /* Set SMAC of Pause frame (00:00:00:00:00:00) */ 2395 ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_HIGH_CFG); 2396 ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_LOW_CFG); 2397 2398 /* Enable transmission of pause frames */ 2399 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1); 2400 2401 /* Drop frames with multicast source address */ 2402 ocelot_rmw_gix(ocelot, ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA, 2403 ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA, 2404 ANA_PORT_DROP_CFG, port); 2405 2406 /* Set default VLAN and tag type to 8021Q. */ 2407 ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q), 2408 REW_PORT_VLAN_CFG_PORT_TPID_M, 2409 REW_PORT_VLAN_CFG, port); 2410 2411 /* Disable source address learning for standalone mode */ 2412 ocelot_port_set_learning(ocelot, port, false); 2413 2414 /* Set the port's initial logical port ID value, enable receiving 2415 * frames on it, and configure the MAC address learning type to 2416 * automatic. 2417 */ 2418 ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO | 2419 ANA_PORT_PORT_CFG_RECV_ENA | 2420 ANA_PORT_PORT_CFG_PORTID_VAL(port), 2421 ANA_PORT_PORT_CFG, port); 2422 2423 /* Enable vcap lookups */ 2424 ocelot_vcap_enable(ocelot, port); 2425 } 2426 EXPORT_SYMBOL(ocelot_init_port); 2427 2428 /* Configure and enable the CPU port module, which is a set of queues 2429 * accessible through register MMIO, frame DMA or Ethernet (in case 2430 * NPI mode is used). 2431 */ 2432 static void ocelot_cpu_port_init(struct ocelot *ocelot) 2433 { 2434 int cpu = ocelot->num_phys_ports; 2435 2436 /* The unicast destination PGID for the CPU port module is unused */ 2437 ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu); 2438 /* Instead set up a multicast destination PGID for traffic copied to 2439 * the CPU. Whitelisted MAC addresses like the port netdevice MAC 2440 * addresses will be copied to the CPU via this PGID. 2441 */ 2442 ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU); 2443 ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA | 2444 ANA_PORT_PORT_CFG_PORTID_VAL(cpu), 2445 ANA_PORT_PORT_CFG, cpu); 2446 2447 /* Enable CPU port module */ 2448 ocelot_fields_write(ocelot, cpu, QSYS_SWITCH_PORT_MODE_PORT_ENA, 1); 2449 /* CPU port Injection/Extraction configuration */ 2450 ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_XTR_HDR, 2451 OCELOT_TAG_PREFIX_NONE); 2452 ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_INJ_HDR, 2453 OCELOT_TAG_PREFIX_NONE); 2454 2455 /* Configure the CPU port to be VLAN aware */ 2456 ocelot_write_gix(ocelot, 2457 ANA_PORT_VLAN_CFG_VLAN_VID(OCELOT_VLAN_UNAWARE_PVID) | 2458 ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | 2459 ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1), 2460 ANA_PORT_VLAN_CFG, cpu); 2461 } 2462 2463 static void ocelot_detect_features(struct ocelot *ocelot) 2464 { 2465 int mmgt, eq_ctrl; 2466 2467 /* For Ocelot, Felix, Seville, Serval etc, SYS:MMGT:MMGT:FREECNT holds 2468 * the number of 240-byte free memory words (aka 4-cell chunks) and not 2469 * 192 bytes as the documentation incorrectly says. 2470 */ 2471 mmgt = ocelot_read(ocelot, SYS_MMGT); 2472 ocelot->packet_buffer_size = 240 * SYS_MMGT_FREECNT(mmgt); 2473 2474 eq_ctrl = ocelot_read(ocelot, QSYS_EQ_CTRL); 2475 ocelot->num_frame_refs = QSYS_MMGT_EQ_CTRL_FP_FREE_CNT(eq_ctrl); 2476 } 2477 2478 int ocelot_init(struct ocelot *ocelot) 2479 { 2480 char queue_name[32]; 2481 int i, ret; 2482 u32 port; 2483 2484 if (ocelot->ops->reset) { 2485 ret = ocelot->ops->reset(ocelot); 2486 if (ret) { 2487 dev_err(ocelot->dev, "Switch reset failed\n"); 2488 return ret; 2489 } 2490 } 2491 2492 ocelot->stats = devm_kcalloc(ocelot->dev, 2493 ocelot->num_phys_ports * ocelot->num_stats, 2494 sizeof(u64), GFP_KERNEL); 2495 if (!ocelot->stats) 2496 return -ENOMEM; 2497 2498 mutex_init(&ocelot->stats_lock); 2499 mutex_init(&ocelot->ptp_lock); 2500 mutex_init(&ocelot->mact_lock); 2501 spin_lock_init(&ocelot->ptp_clock_lock); 2502 spin_lock_init(&ocelot->ts_id_lock); 2503 snprintf(queue_name, sizeof(queue_name), "%s-stats", 2504 dev_name(ocelot->dev)); 2505 ocelot->stats_queue = create_singlethread_workqueue(queue_name); 2506 if (!ocelot->stats_queue) 2507 return -ENOMEM; 2508 2509 ocelot->owq = alloc_ordered_workqueue("ocelot-owq", 0); 2510 if (!ocelot->owq) { 2511 destroy_workqueue(ocelot->stats_queue); 2512 return -ENOMEM; 2513 } 2514 2515 INIT_LIST_HEAD(&ocelot->multicast); 2516 INIT_LIST_HEAD(&ocelot->pgids); 2517 INIT_LIST_HEAD(&ocelot->vlans); 2518 ocelot_detect_features(ocelot); 2519 ocelot_mact_init(ocelot); 2520 ocelot_vlan_init(ocelot); 2521 ocelot_vcap_init(ocelot); 2522 ocelot_cpu_port_init(ocelot); 2523 2524 for (port = 0; port < ocelot->num_phys_ports; port++) { 2525 /* Clear all counters (5 groups) */ 2526 ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port) | 2527 SYS_STAT_CFG_STAT_CLEAR_SHOT(0x7f), 2528 SYS_STAT_CFG); 2529 } 2530 2531 /* Only use S-Tag */ 2532 ocelot_write(ocelot, ETH_P_8021AD, SYS_VLAN_ETYPE_CFG); 2533 2534 /* Aggregation mode */ 2535 ocelot_write(ocelot, ANA_AGGR_CFG_AC_SMAC_ENA | 2536 ANA_AGGR_CFG_AC_DMAC_ENA | 2537 ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA | 2538 ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA | 2539 ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA | 2540 ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA, 2541 ANA_AGGR_CFG); 2542 2543 /* Set MAC age time to default value. The entry is aged after 2544 * 2*AGE_PERIOD 2545 */ 2546 ocelot_write(ocelot, 2547 ANA_AUTOAGE_AGE_PERIOD(BR_DEFAULT_AGEING_TIME / 2 / HZ), 2548 ANA_AUTOAGE); 2549 2550 /* Disable learning for frames discarded by VLAN ingress filtering */ 2551 regmap_field_write(ocelot->regfields[ANA_ADVLEARN_VLAN_CHK], 1); 2552 2553 /* Setup frame ageing - fixed value "2 sec" - in 6.5 us units */ 2554 ocelot_write(ocelot, SYS_FRM_AGING_AGE_TX_ENA | 2555 SYS_FRM_AGING_MAX_AGE(307692), SYS_FRM_AGING); 2556 2557 /* Setup flooding PGIDs */ 2558 for (i = 0; i < ocelot->num_flooding_pgids; i++) 2559 ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) | 2560 ANA_FLOODING_FLD_BROADCAST(PGID_BC) | 2561 ANA_FLOODING_FLD_UNICAST(PGID_UC), 2562 ANA_FLOODING, i); 2563 ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) | 2564 ANA_FLOODING_IPMC_FLD_MC6_CTRL(PGID_MC) | 2565 ANA_FLOODING_IPMC_FLD_MC4_DATA(PGID_MCIPV4) | 2566 ANA_FLOODING_IPMC_FLD_MC4_CTRL(PGID_MC), 2567 ANA_FLOODING_IPMC); 2568 2569 for (port = 0; port < ocelot->num_phys_ports; port++) { 2570 /* Transmit the frame to the local port. */ 2571 ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port); 2572 /* Do not forward BPDU frames to the front ports. */ 2573 ocelot_write_gix(ocelot, 2574 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff), 2575 ANA_PORT_CPU_FWD_BPDU_CFG, 2576 port); 2577 /* Ensure bridging is disabled */ 2578 ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_SRC + port); 2579 } 2580 2581 for_each_nonreserved_multicast_dest_pgid(ocelot, i) { 2582 u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0)); 2583 2584 ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i); 2585 } 2586 2587 ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_BLACKHOLE); 2588 2589 /* Allow broadcast and unknown L2 multicast to the CPU. */ 2590 ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), 2591 ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), 2592 ANA_PGID_PGID, PGID_MC); 2593 ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), 2594 ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), 2595 ANA_PGID_PGID, PGID_BC); 2596 ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV4); 2597 ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV6); 2598 2599 /* Allow manual injection via DEVCPU_QS registers, and byte swap these 2600 * registers endianness. 2601 */ 2602 ocelot_write_rix(ocelot, QS_INJ_GRP_CFG_BYTE_SWAP | 2603 QS_INJ_GRP_CFG_MODE(1), QS_INJ_GRP_CFG, 0); 2604 ocelot_write_rix(ocelot, QS_XTR_GRP_CFG_BYTE_SWAP | 2605 QS_XTR_GRP_CFG_MODE(1), QS_XTR_GRP_CFG, 0); 2606 ocelot_write(ocelot, ANA_CPUQ_CFG_CPUQ_MIRROR(2) | 2607 ANA_CPUQ_CFG_CPUQ_LRN(2) | 2608 ANA_CPUQ_CFG_CPUQ_MAC_COPY(2) | 2609 ANA_CPUQ_CFG_CPUQ_SRC_COPY(2) | 2610 ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE(2) | 2611 ANA_CPUQ_CFG_CPUQ_ALLBRIDGE(6) | 2612 ANA_CPUQ_CFG_CPUQ_IPMC_CTRL(6) | 2613 ANA_CPUQ_CFG_CPUQ_IGMP(6) | 2614 ANA_CPUQ_CFG_CPUQ_MLD(6), ANA_CPUQ_CFG); 2615 for (i = 0; i < 16; i++) 2616 ocelot_write_rix(ocelot, ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL(6) | 2617 ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6), 2618 ANA_CPUQ_8021_CFG, i); 2619 2620 INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work); 2621 queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, 2622 OCELOT_STATS_CHECK_DELAY); 2623 2624 return 0; 2625 } 2626 EXPORT_SYMBOL(ocelot_init); 2627 2628 void ocelot_deinit(struct ocelot *ocelot) 2629 { 2630 cancel_delayed_work(&ocelot->stats_work); 2631 destroy_workqueue(ocelot->stats_queue); 2632 destroy_workqueue(ocelot->owq); 2633 mutex_destroy(&ocelot->stats_lock); 2634 } 2635 EXPORT_SYMBOL(ocelot_deinit); 2636 2637 void ocelot_deinit_port(struct ocelot *ocelot, int port) 2638 { 2639 struct ocelot_port *ocelot_port = ocelot->ports[port]; 2640 2641 skb_queue_purge(&ocelot_port->tx_skbs); 2642 } 2643 EXPORT_SYMBOL(ocelot_deinit_port); 2644 2645 MODULE_LICENSE("Dual MIT/GPL"); 2646