1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Texas Instruments K3 AM65 Ethernet Switchdev Driver 3 * 4 * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com/ 5 * 6 */ 7 8 #include <linux/etherdevice.h> 9 #include <linux/if_bridge.h> 10 #include <linux/netdevice.h> 11 #include <linux/workqueue.h> 12 #include <net/switchdev.h> 13 14 #include "am65-cpsw-nuss.h" 15 #include "am65-cpsw-switchdev.h" 16 #include "cpsw_ale.h" 17 18 struct am65_cpsw_switchdev_event_work { 19 struct work_struct work; 20 struct switchdev_notifier_fdb_info fdb_info; 21 struct am65_cpsw_port *port; 22 unsigned long event; 23 }; 24 25 static int am65_cpsw_port_stp_state_set(struct am65_cpsw_port *port, u8 state) 26 { 27 struct am65_cpsw_common *cpsw = port->common; 28 u8 cpsw_state; 29 int ret = 0; 30 31 switch (state) { 32 case BR_STATE_FORWARDING: 33 cpsw_state = ALE_PORT_STATE_FORWARD; 34 break; 35 case BR_STATE_LEARNING: 36 cpsw_state = ALE_PORT_STATE_LEARN; 37 break; 38 case BR_STATE_DISABLED: 39 cpsw_state = ALE_PORT_STATE_DISABLE; 40 break; 41 case BR_STATE_LISTENING: 42 case BR_STATE_BLOCKING: 43 cpsw_state = ALE_PORT_STATE_BLOCK; 44 break; 45 default: 46 return -EOPNOTSUPP; 47 } 48 49 ret = cpsw_ale_control_set(cpsw->ale, port->port_id, 50 ALE_PORT_STATE, cpsw_state); 51 netdev_dbg(port->ndev, "ale state: %u\n", cpsw_state); 52 53 return ret; 54 } 55 56 static int am65_cpsw_port_attr_br_flags_set(struct am65_cpsw_port *port, 57 struct net_device *orig_dev, 58 unsigned long brport_flags) 59 { 60 struct am65_cpsw_common *cpsw = port->common; 61 bool unreg_mcast_add = false; 62 63 if (brport_flags & BR_MCAST_FLOOD) 64 unreg_mcast_add = true; 65 netdev_dbg(port->ndev, "BR_MCAST_FLOOD: %d port %u\n", 66 unreg_mcast_add, port->port_id); 67 68 cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(port->port_id), 69 unreg_mcast_add); 70 71 return 0; 72 } 73 74 static int am65_cpsw_port_attr_br_flags_pre_set(struct net_device *netdev, 75 unsigned long flags) 76 { 77 if (flags & ~(BR_LEARNING | BR_MCAST_FLOOD)) 78 return -EINVAL; 79 80 return 0; 81 } 82 83 static int am65_cpsw_port_attr_set(struct net_device *ndev, 84 const struct switchdev_attr *attr) 85 { 86 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 87 int ret; 88 89 netdev_dbg(ndev, "attr: id %u port: %u\n", attr->id, port->port_id); 90 91 switch (attr->id) { 92 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: 93 ret = am65_cpsw_port_attr_br_flags_pre_set(ndev, 94 attr->u.brport_flags); 95 break; 96 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 97 ret = am65_cpsw_port_stp_state_set(port, attr->u.stp_state); 98 netdev_dbg(ndev, "stp state: %u\n", attr->u.stp_state); 99 break; 100 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 101 ret = am65_cpsw_port_attr_br_flags_set(port, attr->orig_dev, 102 attr->u.brport_flags); 103 break; 104 default: 105 ret = -EOPNOTSUPP; 106 break; 107 } 108 109 return ret; 110 } 111 112 static u16 am65_cpsw_get_pvid(struct am65_cpsw_port *port) 113 { 114 struct am65_cpsw_common *cpsw = port->common; 115 struct am65_cpsw_host *host_p = am65_common_get_host(cpsw); 116 u32 pvid; 117 118 if (port->port_id) 119 pvid = readl(port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 120 else 121 pvid = readl(host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 122 123 pvid = pvid & 0xfff; 124 125 return pvid; 126 } 127 128 static void am65_cpsw_set_pvid(struct am65_cpsw_port *port, u16 vid, bool cfi, u32 cos) 129 { 130 struct am65_cpsw_common *cpsw = port->common; 131 struct am65_cpsw_host *host_p = am65_common_get_host(cpsw); 132 u32 pvid; 133 134 pvid = vid; 135 pvid |= cfi ? BIT(12) : 0; 136 pvid |= (cos & 0x7) << 13; 137 138 if (port->port_id) 139 writel(pvid, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 140 else 141 writel(pvid, host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 142 } 143 144 static int am65_cpsw_port_vlan_add(struct am65_cpsw_port *port, bool untag, bool pvid, 145 u16 vid, struct net_device *orig_dev) 146 { 147 bool cpu_port = netif_is_bridge_master(orig_dev); 148 struct am65_cpsw_common *cpsw = port->common; 149 int unreg_mcast_mask = 0; 150 int reg_mcast_mask = 0; 151 int untag_mask = 0; 152 int port_mask; 153 int ret = 0; 154 u32 flags; 155 156 if (cpu_port) { 157 port_mask = BIT(HOST_PORT_NUM); 158 flags = orig_dev->flags; 159 unreg_mcast_mask = port_mask; 160 } else { 161 port_mask = BIT(port->port_id); 162 flags = port->ndev->flags; 163 } 164 165 if (flags & IFF_MULTICAST) 166 reg_mcast_mask = port_mask; 167 168 if (untag) 169 untag_mask = port_mask; 170 171 ret = cpsw_ale_vlan_add_modify(cpsw->ale, vid, port_mask, untag_mask, 172 reg_mcast_mask, unreg_mcast_mask); 173 if (ret) { 174 netdev_err(port->ndev, "Unable to add vlan\n"); 175 return ret; 176 } 177 178 if (cpu_port) 179 cpsw_ale_add_ucast(cpsw->ale, port->slave.mac_addr, 180 HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, vid); 181 if (!pvid) 182 return ret; 183 184 am65_cpsw_set_pvid(port, vid, 0, 0); 185 186 netdev_dbg(port->ndev, "VID add: %s: vid:%u ports:%X\n", 187 port->ndev->name, vid, port_mask); 188 189 return ret; 190 } 191 192 static int am65_cpsw_port_vlan_del(struct am65_cpsw_port *port, u16 vid, 193 struct net_device *orig_dev) 194 { 195 bool cpu_port = netif_is_bridge_master(orig_dev); 196 struct am65_cpsw_common *cpsw = port->common; 197 int port_mask; 198 int ret = 0; 199 200 if (cpu_port) 201 port_mask = BIT(HOST_PORT_NUM); 202 else 203 port_mask = BIT(port->port_id); 204 205 ret = cpsw_ale_del_vlan(cpsw->ale, vid, port_mask); 206 if (ret != 0) 207 return ret; 208 209 /* We don't care for the return value here, error is returned only if 210 * the unicast entry is not present 211 */ 212 if (cpu_port) 213 cpsw_ale_del_ucast(cpsw->ale, port->slave.mac_addr, 214 HOST_PORT_NUM, ALE_VLAN, vid); 215 216 if (vid == am65_cpsw_get_pvid(port)) 217 am65_cpsw_set_pvid(port, 0, 0, 0); 218 219 /* We don't care for the return value here, error is returned only if 220 * the multicast entry is not present 221 */ 222 cpsw_ale_del_mcast(cpsw->ale, port->ndev->broadcast, port_mask, 223 ALE_VLAN, vid); 224 netdev_dbg(port->ndev, "VID del: %s: vid:%u ports:%X\n", 225 port->ndev->name, vid, port_mask); 226 227 return ret; 228 } 229 230 static int am65_cpsw_port_vlans_add(struct am65_cpsw_port *port, 231 const struct switchdev_obj_port_vlan *vlan) 232 { 233 bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 234 struct net_device *orig_dev = vlan->obj.orig_dev; 235 bool cpu_port = netif_is_bridge_master(orig_dev); 236 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 237 238 netdev_dbg(port->ndev, "VID add: %s: vid:%u flags:%X\n", 239 port->ndev->name, vlan->vid, vlan->flags); 240 241 if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY)) 242 return 0; 243 244 return am65_cpsw_port_vlan_add(port, untag, pvid, vlan->vid, orig_dev); 245 } 246 247 static int am65_cpsw_port_vlans_del(struct am65_cpsw_port *port, 248 const struct switchdev_obj_port_vlan *vlan) 249 250 { 251 return am65_cpsw_port_vlan_del(port, vlan->vid, vlan->obj.orig_dev); 252 } 253 254 static int am65_cpsw_port_mdb_add(struct am65_cpsw_port *port, 255 struct switchdev_obj_port_mdb *mdb) 256 257 { 258 struct net_device *orig_dev = mdb->obj.orig_dev; 259 bool cpu_port = netif_is_bridge_master(orig_dev); 260 struct am65_cpsw_common *cpsw = port->common; 261 int port_mask; 262 int err; 263 264 if (cpu_port) 265 port_mask = BIT(HOST_PORT_NUM); 266 else 267 port_mask = BIT(port->port_id); 268 269 err = cpsw_ale_add_mcast(cpsw->ale, mdb->addr, port_mask, 270 ALE_VLAN, mdb->vid, 0); 271 netdev_dbg(port->ndev, "MDB add: %s: vid %u:%pM ports: %X\n", 272 port->ndev->name, mdb->vid, mdb->addr, port_mask); 273 274 return err; 275 } 276 277 static int am65_cpsw_port_mdb_del(struct am65_cpsw_port *port, 278 struct switchdev_obj_port_mdb *mdb) 279 280 { 281 struct net_device *orig_dev = mdb->obj.orig_dev; 282 bool cpu_port = netif_is_bridge_master(orig_dev); 283 struct am65_cpsw_common *cpsw = port->common; 284 int del_mask; 285 286 if (cpu_port) 287 del_mask = BIT(HOST_PORT_NUM); 288 else 289 del_mask = BIT(port->port_id); 290 291 /* Ignore error as error code is returned only when entry is already removed */ 292 cpsw_ale_del_mcast(cpsw->ale, mdb->addr, del_mask, 293 ALE_VLAN, mdb->vid); 294 netdev_dbg(port->ndev, "MDB del: %s: vid %u:%pM ports: %X\n", 295 port->ndev->name, mdb->vid, mdb->addr, del_mask); 296 297 return 0; 298 } 299 300 static int am65_cpsw_port_obj_add(struct net_device *ndev, 301 const struct switchdev_obj *obj, 302 struct netlink_ext_ack *extack) 303 { 304 struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 305 struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj); 306 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 307 int err = 0; 308 309 netdev_dbg(ndev, "obj_add: id %u port: %u\n", obj->id, port->port_id); 310 311 switch (obj->id) { 312 case SWITCHDEV_OBJ_ID_PORT_VLAN: 313 err = am65_cpsw_port_vlans_add(port, vlan); 314 break; 315 case SWITCHDEV_OBJ_ID_PORT_MDB: 316 case SWITCHDEV_OBJ_ID_HOST_MDB: 317 err = am65_cpsw_port_mdb_add(port, mdb); 318 break; 319 default: 320 err = -EOPNOTSUPP; 321 break; 322 } 323 324 return err; 325 } 326 327 static int am65_cpsw_port_obj_del(struct net_device *ndev, 328 const struct switchdev_obj *obj) 329 { 330 struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 331 struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj); 332 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 333 int err = 0; 334 335 netdev_dbg(ndev, "obj_del: id %u port: %u\n", obj->id, port->port_id); 336 337 switch (obj->id) { 338 case SWITCHDEV_OBJ_ID_PORT_VLAN: 339 err = am65_cpsw_port_vlans_del(port, vlan); 340 break; 341 case SWITCHDEV_OBJ_ID_PORT_MDB: 342 case SWITCHDEV_OBJ_ID_HOST_MDB: 343 err = am65_cpsw_port_mdb_del(port, mdb); 344 break; 345 default: 346 err = -EOPNOTSUPP; 347 break; 348 } 349 350 return err; 351 } 352 353 static void am65_cpsw_fdb_offload_notify(struct net_device *ndev, 354 struct switchdev_notifier_fdb_info *rcv) 355 { 356 struct switchdev_notifier_fdb_info info; 357 358 info.addr = rcv->addr; 359 info.vid = rcv->vid; 360 info.offloaded = true; 361 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, 362 ndev, &info.info, NULL); 363 } 364 365 static void am65_cpsw_switchdev_event_work(struct work_struct *work) 366 { 367 struct am65_cpsw_switchdev_event_work *switchdev_work = 368 container_of(work, struct am65_cpsw_switchdev_event_work, work); 369 struct am65_cpsw_port *port = switchdev_work->port; 370 struct switchdev_notifier_fdb_info *fdb; 371 struct am65_cpsw_common *cpsw = port->common; 372 int port_id = port->port_id; 373 374 rtnl_lock(); 375 switch (switchdev_work->event) { 376 case SWITCHDEV_FDB_ADD_TO_DEVICE: 377 fdb = &switchdev_work->fdb_info; 378 379 netdev_dbg(port->ndev, "cpsw_fdb_add: MACID = %pM vid = %u flags = %u %u -- port %d\n", 380 fdb->addr, fdb->vid, fdb->added_by_user, 381 fdb->offloaded, port_id); 382 383 if (!fdb->added_by_user) 384 break; 385 if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0) 386 port_id = HOST_PORT_NUM; 387 388 cpsw_ale_add_ucast(cpsw->ale, (u8 *)fdb->addr, port_id, 389 fdb->vid ? ALE_VLAN : 0, fdb->vid); 390 am65_cpsw_fdb_offload_notify(port->ndev, fdb); 391 break; 392 case SWITCHDEV_FDB_DEL_TO_DEVICE: 393 fdb = &switchdev_work->fdb_info; 394 395 netdev_dbg(port->ndev, "cpsw_fdb_del: MACID = %pM vid = %u flags = %u %u -- port %d\n", 396 fdb->addr, fdb->vid, fdb->added_by_user, 397 fdb->offloaded, port_id); 398 399 if (!fdb->added_by_user) 400 break; 401 if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0) 402 port_id = HOST_PORT_NUM; 403 404 cpsw_ale_del_ucast(cpsw->ale, (u8 *)fdb->addr, port_id, 405 fdb->vid ? ALE_VLAN : 0, fdb->vid); 406 break; 407 default: 408 break; 409 } 410 rtnl_unlock(); 411 412 kfree(switchdev_work->fdb_info.addr); 413 kfree(switchdev_work); 414 dev_put(port->ndev); 415 } 416 417 /* called under rcu_read_lock() */ 418 static int am65_cpsw_switchdev_event(struct notifier_block *unused, 419 unsigned long event, void *ptr) 420 { 421 struct net_device *ndev = switchdev_notifier_info_to_dev(ptr); 422 struct am65_cpsw_switchdev_event_work *switchdev_work; 423 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 424 struct switchdev_notifier_fdb_info *fdb_info = ptr; 425 int err; 426 427 if (event == SWITCHDEV_PORT_ATTR_SET) { 428 err = switchdev_handle_port_attr_set(ndev, ptr, 429 am65_cpsw_port_dev_check, 430 am65_cpsw_port_attr_set); 431 return notifier_from_errno(err); 432 } 433 434 if (!am65_cpsw_port_dev_check(ndev)) 435 return NOTIFY_DONE; 436 437 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); 438 if (WARN_ON(!switchdev_work)) 439 return NOTIFY_BAD; 440 441 INIT_WORK(&switchdev_work->work, am65_cpsw_switchdev_event_work); 442 switchdev_work->port = port; 443 switchdev_work->event = event; 444 445 switch (event) { 446 case SWITCHDEV_FDB_ADD_TO_DEVICE: 447 case SWITCHDEV_FDB_DEL_TO_DEVICE: 448 memcpy(&switchdev_work->fdb_info, ptr, 449 sizeof(switchdev_work->fdb_info)); 450 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); 451 if (!switchdev_work->fdb_info.addr) 452 goto err_addr_alloc; 453 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, 454 fdb_info->addr); 455 dev_hold(ndev); 456 break; 457 default: 458 kfree(switchdev_work); 459 return NOTIFY_DONE; 460 } 461 462 queue_work(system_long_wq, &switchdev_work->work); 463 464 return NOTIFY_DONE; 465 466 err_addr_alloc: 467 kfree(switchdev_work); 468 return NOTIFY_BAD; 469 } 470 471 static struct notifier_block cpsw_switchdev_notifier = { 472 .notifier_call = am65_cpsw_switchdev_event, 473 }; 474 475 static int am65_cpsw_switchdev_blocking_event(struct notifier_block *unused, 476 unsigned long event, void *ptr) 477 { 478 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 479 int err; 480 481 switch (event) { 482 case SWITCHDEV_PORT_OBJ_ADD: 483 err = switchdev_handle_port_obj_add(dev, ptr, 484 am65_cpsw_port_dev_check, 485 am65_cpsw_port_obj_add); 486 return notifier_from_errno(err); 487 case SWITCHDEV_PORT_OBJ_DEL: 488 err = switchdev_handle_port_obj_del(dev, ptr, 489 am65_cpsw_port_dev_check, 490 am65_cpsw_port_obj_del); 491 return notifier_from_errno(err); 492 case SWITCHDEV_PORT_ATTR_SET: 493 err = switchdev_handle_port_attr_set(dev, ptr, 494 am65_cpsw_port_dev_check, 495 am65_cpsw_port_attr_set); 496 return notifier_from_errno(err); 497 default: 498 break; 499 } 500 501 return NOTIFY_DONE; 502 } 503 504 static struct notifier_block cpsw_switchdev_bl_notifier = { 505 .notifier_call = am65_cpsw_switchdev_blocking_event, 506 }; 507 508 int am65_cpsw_switchdev_register_notifiers(struct am65_cpsw_common *cpsw) 509 { 510 int ret = 0; 511 512 ret = register_switchdev_notifier(&cpsw_switchdev_notifier); 513 if (ret) { 514 dev_err(cpsw->dev, "register switchdev notifier fail ret:%d\n", 515 ret); 516 return ret; 517 } 518 519 ret = register_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier); 520 if (ret) { 521 dev_err(cpsw->dev, "register switchdev blocking notifier ret:%d\n", 522 ret); 523 unregister_switchdev_notifier(&cpsw_switchdev_notifier); 524 } 525 526 return ret; 527 } 528 529 void am65_cpsw_switchdev_unregister_notifiers(struct am65_cpsw_common *cpsw) 530 { 531 unregister_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier); 532 unregister_switchdev_notifier(&cpsw_switchdev_notifier); 533 } 534