1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/dsa/dsa.c - Hardware switch handling 4 * Copyright (c) 2008-2009 Marvell Semiconductor 5 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org> 6 */ 7 8 #include <linux/device.h> 9 #include <linux/list.h> 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <linux/sysfs.h> 13 #include <linux/ptp_classify.h> 14 #include <net/dst_metadata.h> 15 16 #include "dsa_priv.h" 17 18 static LIST_HEAD(dsa_tag_drivers_list); 19 static DEFINE_MUTEX(dsa_tag_drivers_lock); 20 21 static void dsa_tag_driver_register(struct dsa_tag_driver *dsa_tag_driver, 22 struct module *owner) 23 { 24 dsa_tag_driver->owner = owner; 25 26 mutex_lock(&dsa_tag_drivers_lock); 27 list_add_tail(&dsa_tag_driver->list, &dsa_tag_drivers_list); 28 mutex_unlock(&dsa_tag_drivers_lock); 29 } 30 31 void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[], 32 unsigned int count, struct module *owner) 33 { 34 unsigned int i; 35 36 for (i = 0; i < count; i++) 37 dsa_tag_driver_register(dsa_tag_driver_array[i], owner); 38 } 39 40 static void dsa_tag_driver_unregister(struct dsa_tag_driver *dsa_tag_driver) 41 { 42 mutex_lock(&dsa_tag_drivers_lock); 43 list_del(&dsa_tag_driver->list); 44 mutex_unlock(&dsa_tag_drivers_lock); 45 } 46 EXPORT_SYMBOL_GPL(dsa_tag_drivers_register); 47 48 void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[], 49 unsigned int count) 50 { 51 unsigned int i; 52 53 for (i = 0; i < count; i++) 54 dsa_tag_driver_unregister(dsa_tag_driver_array[i]); 55 } 56 EXPORT_SYMBOL_GPL(dsa_tag_drivers_unregister); 57 58 const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops) 59 { 60 return ops->name; 61 }; 62 63 /* Function takes a reference on the module owning the tagger, 64 * so dsa_tag_driver_put must be called afterwards. 65 */ 66 const struct dsa_device_ops *dsa_tag_driver_get_by_name(const char *name) 67 { 68 const struct dsa_device_ops *ops = ERR_PTR(-ENOPROTOOPT); 69 struct dsa_tag_driver *dsa_tag_driver; 70 71 request_module("%s%s", DSA_TAG_DRIVER_ALIAS, name); 72 73 mutex_lock(&dsa_tag_drivers_lock); 74 list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) { 75 const struct dsa_device_ops *tmp = dsa_tag_driver->ops; 76 77 if (strcmp(name, tmp->name)) 78 continue; 79 80 if (!try_module_get(dsa_tag_driver->owner)) 81 break; 82 83 ops = tmp; 84 break; 85 } 86 mutex_unlock(&dsa_tag_drivers_lock); 87 88 return ops; 89 } 90 91 const struct dsa_device_ops *dsa_tag_driver_get_by_id(int tag_protocol) 92 { 93 struct dsa_tag_driver *dsa_tag_driver; 94 const struct dsa_device_ops *ops; 95 bool found = false; 96 97 request_module("%sid-%d", DSA_TAG_DRIVER_ALIAS, tag_protocol); 98 99 mutex_lock(&dsa_tag_drivers_lock); 100 list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) { 101 ops = dsa_tag_driver->ops; 102 if (ops->proto == tag_protocol) { 103 found = true; 104 break; 105 } 106 } 107 108 if (found) { 109 if (!try_module_get(dsa_tag_driver->owner)) 110 ops = ERR_PTR(-ENOPROTOOPT); 111 } else { 112 ops = ERR_PTR(-ENOPROTOOPT); 113 } 114 115 mutex_unlock(&dsa_tag_drivers_lock); 116 117 return ops; 118 } 119 120 void dsa_tag_driver_put(const struct dsa_device_ops *ops) 121 { 122 struct dsa_tag_driver *dsa_tag_driver; 123 124 mutex_lock(&dsa_tag_drivers_lock); 125 list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) { 126 if (dsa_tag_driver->ops == ops) { 127 module_put(dsa_tag_driver->owner); 128 break; 129 } 130 } 131 mutex_unlock(&dsa_tag_drivers_lock); 132 } 133 134 static int dev_is_class(struct device *dev, void *class) 135 { 136 if (dev->class != NULL && !strcmp(dev->class->name, class)) 137 return 1; 138 139 return 0; 140 } 141 142 static struct device *dev_find_class(struct device *parent, char *class) 143 { 144 if (dev_is_class(parent, class)) { 145 get_device(parent); 146 return parent; 147 } 148 149 return device_find_child(parent, class, dev_is_class); 150 } 151 152 struct net_device *dsa_dev_to_net_device(struct device *dev) 153 { 154 struct device *d; 155 156 d = dev_find_class(dev, "net"); 157 if (d != NULL) { 158 struct net_device *nd; 159 160 nd = to_net_dev(d); 161 dev_hold(nd); 162 put_device(d); 163 164 return nd; 165 } 166 167 return NULL; 168 } 169 170 /* Determine if we should defer delivery of skb until we have a rx timestamp. 171 * 172 * Called from dsa_switch_rcv. For now, this will only work if tagging is 173 * enabled on the switch. Normally the MAC driver would retrieve the hardware 174 * timestamp when it reads the packet out of the hardware. However in a DSA 175 * switch, the DSA driver owning the interface to which the packet is 176 * delivered is never notified unless we do so here. 177 */ 178 static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p, 179 struct sk_buff *skb) 180 { 181 struct dsa_switch *ds = p->dp->ds; 182 unsigned int type; 183 184 if (skb_headroom(skb) < ETH_HLEN) 185 return false; 186 187 __skb_push(skb, ETH_HLEN); 188 189 type = ptp_classify_raw(skb); 190 191 __skb_pull(skb, ETH_HLEN); 192 193 if (type == PTP_CLASS_NONE) 194 return false; 195 196 if (likely(ds->ops->port_rxtstamp)) 197 return ds->ops->port_rxtstamp(ds, p->dp->index, skb, type); 198 199 return false; 200 } 201 202 static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev, 203 struct packet_type *pt, struct net_device *unused) 204 { 205 struct metadata_dst *md_dst = skb_metadata_dst(skb); 206 struct dsa_port *cpu_dp = dev->dsa_ptr; 207 struct sk_buff *nskb = NULL; 208 struct dsa_slave_priv *p; 209 210 if (unlikely(!cpu_dp)) { 211 kfree_skb(skb); 212 return 0; 213 } 214 215 skb = skb_unshare(skb, GFP_ATOMIC); 216 if (!skb) 217 return 0; 218 219 if (md_dst && md_dst->type == METADATA_HW_PORT_MUX) { 220 unsigned int port = md_dst->u.port_info.port_id; 221 222 skb_dst_drop(skb); 223 if (!skb_has_extensions(skb)) 224 skb->slow_gro = 0; 225 226 skb->dev = dsa_master_find_slave(dev, 0, port); 227 if (likely(skb->dev)) { 228 dsa_default_offload_fwd_mark(skb); 229 nskb = skb; 230 } 231 } else { 232 nskb = cpu_dp->rcv(skb, dev); 233 } 234 235 if (!nskb) { 236 kfree_skb(skb); 237 return 0; 238 } 239 240 skb = nskb; 241 skb_push(skb, ETH_HLEN); 242 skb->pkt_type = PACKET_HOST; 243 skb->protocol = eth_type_trans(skb, skb->dev); 244 245 if (unlikely(!dsa_slave_dev_check(skb->dev))) { 246 /* Packet is to be injected directly on an upper 247 * device, e.g. a team/bond, so skip all DSA-port 248 * specific actions. 249 */ 250 netif_rx(skb); 251 return 0; 252 } 253 254 p = netdev_priv(skb->dev); 255 256 if (unlikely(cpu_dp->ds->untag_bridge_pvid)) { 257 nskb = dsa_untag_bridge_pvid(skb); 258 if (!nskb) { 259 kfree_skb(skb); 260 return 0; 261 } 262 skb = nskb; 263 } 264 265 dev_sw_netstats_rx_add(skb->dev, skb->len); 266 267 if (dsa_skb_defer_rx_timestamp(p, skb)) 268 return 0; 269 270 gro_cells_receive(&p->gcells, skb); 271 272 return 0; 273 } 274 275 #ifdef CONFIG_PM_SLEEP 276 static bool dsa_port_is_initialized(const struct dsa_port *dp) 277 { 278 return dp->type == DSA_PORT_TYPE_USER && dp->slave; 279 } 280 281 int dsa_switch_suspend(struct dsa_switch *ds) 282 { 283 struct dsa_port *dp; 284 int ret = 0; 285 286 /* Suspend slave network devices */ 287 dsa_switch_for_each_port(dp, ds) { 288 if (!dsa_port_is_initialized(dp)) 289 continue; 290 291 ret = dsa_slave_suspend(dp->slave); 292 if (ret) 293 return ret; 294 } 295 296 if (ds->ops->suspend) 297 ret = ds->ops->suspend(ds); 298 299 return ret; 300 } 301 EXPORT_SYMBOL_GPL(dsa_switch_suspend); 302 303 int dsa_switch_resume(struct dsa_switch *ds) 304 { 305 struct dsa_port *dp; 306 int ret = 0; 307 308 if (ds->ops->resume) 309 ret = ds->ops->resume(ds); 310 311 if (ret) 312 return ret; 313 314 /* Resume slave network devices */ 315 dsa_switch_for_each_port(dp, ds) { 316 if (!dsa_port_is_initialized(dp)) 317 continue; 318 319 ret = dsa_slave_resume(dp->slave); 320 if (ret) 321 return ret; 322 } 323 324 return 0; 325 } 326 EXPORT_SYMBOL_GPL(dsa_switch_resume); 327 #endif 328 329 static struct packet_type dsa_pack_type __read_mostly = { 330 .type = cpu_to_be16(ETH_P_XDSA), 331 .func = dsa_switch_rcv, 332 }; 333 334 static struct workqueue_struct *dsa_owq; 335 336 bool dsa_schedule_work(struct work_struct *work) 337 { 338 return queue_work(dsa_owq, work); 339 } 340 341 void dsa_flush_workqueue(void) 342 { 343 flush_workqueue(dsa_owq); 344 } 345 EXPORT_SYMBOL_GPL(dsa_flush_workqueue); 346 347 struct dsa_port *dsa_port_from_netdev(struct net_device *netdev) 348 { 349 if (!netdev || !dsa_slave_dev_check(netdev)) 350 return ERR_PTR(-ENODEV); 351 352 return dsa_slave_to_port(netdev); 353 } 354 EXPORT_SYMBOL_GPL(dsa_port_from_netdev); 355 356 bool dsa_db_equal(const struct dsa_db *a, const struct dsa_db *b) 357 { 358 if (a->type != b->type) 359 return false; 360 361 switch (a->type) { 362 case DSA_DB_PORT: 363 return a->dp == b->dp; 364 case DSA_DB_LAG: 365 return a->lag.dev == b->lag.dev; 366 case DSA_DB_BRIDGE: 367 return a->bridge.num == b->bridge.num; 368 default: 369 WARN_ON(1); 370 return false; 371 } 372 } 373 374 bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port, 375 const unsigned char *addr, u16 vid, 376 struct dsa_db db) 377 { 378 struct dsa_port *dp = dsa_to_port(ds, port); 379 struct dsa_mac_addr *a; 380 381 lockdep_assert_held(&dp->addr_lists_lock); 382 383 list_for_each_entry(a, &dp->fdbs, list) { 384 if (!ether_addr_equal(a->addr, addr) || a->vid != vid) 385 continue; 386 387 if (a->db.type == db.type && !dsa_db_equal(&a->db, &db)) 388 return true; 389 } 390 391 return false; 392 } 393 EXPORT_SYMBOL_GPL(dsa_fdb_present_in_other_db); 394 395 bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port, 396 const struct switchdev_obj_port_mdb *mdb, 397 struct dsa_db db) 398 { 399 struct dsa_port *dp = dsa_to_port(ds, port); 400 struct dsa_mac_addr *a; 401 402 lockdep_assert_held(&dp->addr_lists_lock); 403 404 list_for_each_entry(a, &dp->mdbs, list) { 405 if (!ether_addr_equal(a->addr, mdb->addr) || a->vid != mdb->vid) 406 continue; 407 408 if (a->db.type == db.type && !dsa_db_equal(&a->db, &db)) 409 return true; 410 } 411 412 return false; 413 } 414 EXPORT_SYMBOL_GPL(dsa_mdb_present_in_other_db); 415 416 static int __init dsa_init_module(void) 417 { 418 int rc; 419 420 dsa_owq = alloc_ordered_workqueue("dsa_ordered", 421 WQ_MEM_RECLAIM); 422 if (!dsa_owq) 423 return -ENOMEM; 424 425 rc = dsa_slave_register_notifier(); 426 if (rc) 427 goto register_notifier_fail; 428 429 dev_add_pack(&dsa_pack_type); 430 431 rc = rtnl_link_register(&dsa_link_ops); 432 if (rc) 433 goto netlink_register_fail; 434 435 return 0; 436 437 netlink_register_fail: 438 dsa_slave_unregister_notifier(); 439 dev_remove_pack(&dsa_pack_type); 440 register_notifier_fail: 441 destroy_workqueue(dsa_owq); 442 443 return rc; 444 } 445 module_init(dsa_init_module); 446 447 static void __exit dsa_cleanup_module(void) 448 { 449 rtnl_link_unregister(&dsa_link_ops); 450 451 dsa_slave_unregister_notifier(); 452 dev_remove_pack(&dsa_pack_type); 453 destroy_workqueue(dsa_owq); 454 } 455 module_exit(dsa_cleanup_module); 456 457 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>"); 458 MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips"); 459 MODULE_LICENSE("GPL"); 460 MODULE_ALIAS("platform:dsa"); 461