xref: /openbmc/linux/net/dsa/slave.c (revision 1e8a3f0d2a1ef544611a7ea4a7c1512c732e0e43)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/dsa/slave.c - Slave device handling
4  * Copyright (c) 2008-2009 Marvell Semiconductor
5  */
6 
7 #include <linux/list.h>
8 #include <linux/etherdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/phy.h>
11 #include <linux/phy_fixed.h>
12 #include <linux/phylink.h>
13 #include <linux/of_net.h>
14 #include <linux/of_mdio.h>
15 #include <linux/mdio.h>
16 #include <net/rtnetlink.h>
17 #include <net/pkt_cls.h>
18 #include <net/selftests.h>
19 #include <net/tc_act/tc_mirred.h>
20 #include <linux/if_bridge.h>
21 #include <linux/if_hsr.h>
22 #include <linux/netpoll.h>
23 
24 #include "dsa_priv.h"
25 
26 static void dsa_slave_standalone_event_work(struct work_struct *work)
27 {
28 	struct dsa_standalone_event_work *standalone_work =
29 		container_of(work, struct dsa_standalone_event_work, work);
30 	const unsigned char *addr = standalone_work->addr;
31 	struct net_device *dev = standalone_work->dev;
32 	struct dsa_port *dp = dsa_slave_to_port(dev);
33 	struct switchdev_obj_port_mdb mdb;
34 	struct dsa_switch *ds = dp->ds;
35 	u16 vid = standalone_work->vid;
36 	int err;
37 
38 	switch (standalone_work->event) {
39 	case DSA_UC_ADD:
40 		err = dsa_port_standalone_host_fdb_add(dp, addr, vid);
41 		if (err) {
42 			dev_err(ds->dev,
43 				"port %d failed to add %pM vid %d to fdb: %d\n",
44 				dp->index, addr, vid, err);
45 			break;
46 		}
47 		break;
48 
49 	case DSA_UC_DEL:
50 		err = dsa_port_standalone_host_fdb_del(dp, addr, vid);
51 		if (err) {
52 			dev_err(ds->dev,
53 				"port %d failed to delete %pM vid %d from fdb: %d\n",
54 				dp->index, addr, vid, err);
55 		}
56 
57 		break;
58 	case DSA_MC_ADD:
59 		ether_addr_copy(mdb.addr, addr);
60 		mdb.vid = vid;
61 
62 		err = dsa_port_standalone_host_mdb_add(dp, &mdb);
63 		if (err) {
64 			dev_err(ds->dev,
65 				"port %d failed to add %pM vid %d to mdb: %d\n",
66 				dp->index, addr, vid, err);
67 			break;
68 		}
69 		break;
70 	case DSA_MC_DEL:
71 		ether_addr_copy(mdb.addr, addr);
72 		mdb.vid = vid;
73 
74 		err = dsa_port_standalone_host_mdb_del(dp, &mdb);
75 		if (err) {
76 			dev_err(ds->dev,
77 				"port %d failed to delete %pM vid %d from mdb: %d\n",
78 				dp->index, addr, vid, err);
79 		}
80 
81 		break;
82 	}
83 
84 	kfree(standalone_work);
85 }
86 
87 static int dsa_slave_schedule_standalone_work(struct net_device *dev,
88 					      enum dsa_standalone_event event,
89 					      const unsigned char *addr,
90 					      u16 vid)
91 {
92 	struct dsa_standalone_event_work *standalone_work;
93 
94 	standalone_work = kzalloc(sizeof(*standalone_work), GFP_ATOMIC);
95 	if (!standalone_work)
96 		return -ENOMEM;
97 
98 	INIT_WORK(&standalone_work->work, dsa_slave_standalone_event_work);
99 	standalone_work->event = event;
100 	standalone_work->dev = dev;
101 
102 	ether_addr_copy(standalone_work->addr, addr);
103 	standalone_work->vid = vid;
104 
105 	dsa_schedule_work(&standalone_work->work);
106 
107 	return 0;
108 }
109 
110 static int dsa_slave_sync_uc(struct net_device *dev,
111 			     const unsigned char *addr)
112 {
113 	return dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD, addr, 0);
114 }
115 
116 static int dsa_slave_unsync_uc(struct net_device *dev,
117 			       const unsigned char *addr)
118 {
119 	return dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL, addr, 0);
120 }
121 
122 static int dsa_slave_sync_mc(struct net_device *dev,
123 			     const unsigned char *addr)
124 {
125 	return dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, addr, 0);
126 }
127 
128 static int dsa_slave_unsync_mc(struct net_device *dev,
129 			       const unsigned char *addr)
130 {
131 	return dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, addr, 0);
132 }
133 
134 /* slave mii_bus handling ***************************************************/
135 static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
136 {
137 	struct dsa_switch *ds = bus->priv;
138 
139 	if (ds->phys_mii_mask & (1 << addr))
140 		return ds->ops->phy_read(ds, addr, reg);
141 
142 	return 0xffff;
143 }
144 
145 static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
146 {
147 	struct dsa_switch *ds = bus->priv;
148 
149 	if (ds->phys_mii_mask & (1 << addr))
150 		return ds->ops->phy_write(ds, addr, reg, val);
151 
152 	return 0;
153 }
154 
155 void dsa_slave_mii_bus_init(struct dsa_switch *ds)
156 {
157 	ds->slave_mii_bus->priv = (void *)ds;
158 	ds->slave_mii_bus->name = "dsa slave smi";
159 	ds->slave_mii_bus->read = dsa_slave_phy_read;
160 	ds->slave_mii_bus->write = dsa_slave_phy_write;
161 	snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
162 		 ds->dst->index, ds->index);
163 	ds->slave_mii_bus->parent = ds->dev;
164 	ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
165 }
166 
167 
168 /* slave device handling ****************************************************/
169 static int dsa_slave_get_iflink(const struct net_device *dev)
170 {
171 	return dsa_slave_to_master(dev)->ifindex;
172 }
173 
174 static int dsa_slave_open(struct net_device *dev)
175 {
176 	struct net_device *master = dsa_slave_to_master(dev);
177 	struct dsa_port *dp = dsa_slave_to_port(dev);
178 	struct dsa_switch *ds = dp->ds;
179 	int err;
180 
181 	err = dev_open(master, NULL);
182 	if (err < 0) {
183 		netdev_err(dev, "failed to open master %s\n", master->name);
184 		goto out;
185 	}
186 
187 	if (dsa_switch_supports_uc_filtering(ds)) {
188 		err = dsa_port_standalone_host_fdb_add(dp, dev->dev_addr, 0);
189 		if (err)
190 			goto out;
191 	}
192 
193 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
194 		err = dev_uc_add(master, dev->dev_addr);
195 		if (err < 0)
196 			goto del_host_addr;
197 	}
198 
199 	err = dsa_port_enable_rt(dp, dev->phydev);
200 	if (err)
201 		goto del_unicast;
202 
203 	return 0;
204 
205 del_unicast:
206 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
207 		dev_uc_del(master, dev->dev_addr);
208 del_host_addr:
209 	if (dsa_switch_supports_uc_filtering(ds))
210 		dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
211 out:
212 	return err;
213 }
214 
215 static int dsa_slave_close(struct net_device *dev)
216 {
217 	struct net_device *master = dsa_slave_to_master(dev);
218 	struct dsa_port *dp = dsa_slave_to_port(dev);
219 	struct dsa_switch *ds = dp->ds;
220 
221 	dsa_port_disable_rt(dp);
222 
223 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
224 		dev_uc_del(master, dev->dev_addr);
225 
226 	if (dsa_switch_supports_uc_filtering(ds))
227 		dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
228 
229 	return 0;
230 }
231 
232 /* Keep flooding enabled towards this port's CPU port as long as it serves at
233  * least one port in the tree that requires it.
234  */
235 static void dsa_port_manage_cpu_flood(struct dsa_port *dp)
236 {
237 	struct switchdev_brport_flags flags = {
238 		.mask = BR_FLOOD | BR_MCAST_FLOOD,
239 	};
240 	struct dsa_switch_tree *dst = dp->ds->dst;
241 	struct dsa_port *cpu_dp = dp->cpu_dp;
242 	struct dsa_port *other_dp;
243 	int err;
244 
245 	list_for_each_entry(other_dp, &dst->ports, list) {
246 		if (!dsa_port_is_user(other_dp))
247 			continue;
248 
249 		if (other_dp->cpu_dp != cpu_dp)
250 			continue;
251 
252 		if (other_dp->slave->flags & IFF_ALLMULTI)
253 			flags.val |= BR_MCAST_FLOOD;
254 		if (other_dp->slave->flags & IFF_PROMISC)
255 			flags.val |= BR_FLOOD;
256 	}
257 
258 	err = dsa_port_pre_bridge_flags(dp, flags, NULL);
259 	if (err)
260 		return;
261 
262 	dsa_port_bridge_flags(cpu_dp, flags, NULL);
263 }
264 
265 static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
266 {
267 	struct net_device *master = dsa_slave_to_master(dev);
268 	struct dsa_port *dp = dsa_slave_to_port(dev);
269 	struct dsa_switch *ds = dp->ds;
270 
271 	if (change & IFF_ALLMULTI)
272 		dev_set_allmulti(master,
273 				 dev->flags & IFF_ALLMULTI ? 1 : -1);
274 	if (change & IFF_PROMISC)
275 		dev_set_promiscuity(master,
276 				    dev->flags & IFF_PROMISC ? 1 : -1);
277 
278 	if (dsa_switch_supports_uc_filtering(ds) &&
279 	    dsa_switch_supports_mc_filtering(ds))
280 		dsa_port_manage_cpu_flood(dp);
281 }
282 
283 static void dsa_slave_set_rx_mode(struct net_device *dev)
284 {
285 	struct net_device *master = dsa_slave_to_master(dev);
286 	struct dsa_port *dp = dsa_slave_to_port(dev);
287 	struct dsa_switch *ds = dp->ds;
288 
289 	dev_mc_sync(master, dev);
290 	dev_uc_sync(master, dev);
291 	if (dsa_switch_supports_mc_filtering(ds))
292 		__dev_mc_sync(dev, dsa_slave_sync_mc, dsa_slave_unsync_mc);
293 	if (dsa_switch_supports_uc_filtering(ds))
294 		__dev_uc_sync(dev, dsa_slave_sync_uc, dsa_slave_unsync_uc);
295 }
296 
297 static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
298 {
299 	struct net_device *master = dsa_slave_to_master(dev);
300 	struct dsa_port *dp = dsa_slave_to_port(dev);
301 	struct dsa_switch *ds = dp->ds;
302 	struct sockaddr *addr = a;
303 	int err;
304 
305 	if (!is_valid_ether_addr(addr->sa_data))
306 		return -EADDRNOTAVAIL;
307 
308 	/* If the port is down, the address isn't synced yet to hardware or
309 	 * to the DSA master, so there is nothing to change.
310 	 */
311 	if (!(dev->flags & IFF_UP))
312 		goto out_change_dev_addr;
313 
314 	if (dsa_switch_supports_uc_filtering(ds)) {
315 		err = dsa_port_standalone_host_fdb_add(dp, addr->sa_data, 0);
316 		if (err)
317 			return err;
318 	}
319 
320 	if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
321 		err = dev_uc_add(master, addr->sa_data);
322 		if (err < 0)
323 			goto del_unicast;
324 	}
325 
326 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
327 		dev_uc_del(master, dev->dev_addr);
328 
329 	if (dsa_switch_supports_uc_filtering(ds))
330 		dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
331 
332 out_change_dev_addr:
333 	eth_hw_addr_set(dev, addr->sa_data);
334 
335 	return 0;
336 
337 del_unicast:
338 	if (dsa_switch_supports_uc_filtering(ds))
339 		dsa_port_standalone_host_fdb_del(dp, addr->sa_data, 0);
340 
341 	return err;
342 }
343 
344 struct dsa_slave_dump_ctx {
345 	struct net_device *dev;
346 	struct sk_buff *skb;
347 	struct netlink_callback *cb;
348 	int idx;
349 };
350 
351 static int
352 dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
353 			   bool is_static, void *data)
354 {
355 	struct dsa_slave_dump_ctx *dump = data;
356 	u32 portid = NETLINK_CB(dump->cb->skb).portid;
357 	u32 seq = dump->cb->nlh->nlmsg_seq;
358 	struct nlmsghdr *nlh;
359 	struct ndmsg *ndm;
360 
361 	if (dump->idx < dump->cb->args[2])
362 		goto skip;
363 
364 	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
365 			sizeof(*ndm), NLM_F_MULTI);
366 	if (!nlh)
367 		return -EMSGSIZE;
368 
369 	ndm = nlmsg_data(nlh);
370 	ndm->ndm_family  = AF_BRIDGE;
371 	ndm->ndm_pad1    = 0;
372 	ndm->ndm_pad2    = 0;
373 	ndm->ndm_flags   = NTF_SELF;
374 	ndm->ndm_type    = 0;
375 	ndm->ndm_ifindex = dump->dev->ifindex;
376 	ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;
377 
378 	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
379 		goto nla_put_failure;
380 
381 	if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
382 		goto nla_put_failure;
383 
384 	nlmsg_end(dump->skb, nlh);
385 
386 skip:
387 	dump->idx++;
388 	return 0;
389 
390 nla_put_failure:
391 	nlmsg_cancel(dump->skb, nlh);
392 	return -EMSGSIZE;
393 }
394 
395 static int
396 dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
397 		   struct net_device *dev, struct net_device *filter_dev,
398 		   int *idx)
399 {
400 	struct dsa_port *dp = dsa_slave_to_port(dev);
401 	struct dsa_slave_dump_ctx dump = {
402 		.dev = dev,
403 		.skb = skb,
404 		.cb = cb,
405 		.idx = *idx,
406 	};
407 	int err;
408 
409 	err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
410 	*idx = dump.idx;
411 
412 	return err;
413 }
414 
415 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
416 {
417 	struct dsa_slave_priv *p = netdev_priv(dev);
418 	struct dsa_switch *ds = p->dp->ds;
419 	int port = p->dp->index;
420 
421 	/* Pass through to switch driver if it supports timestamping */
422 	switch (cmd) {
423 	case SIOCGHWTSTAMP:
424 		if (ds->ops->port_hwtstamp_get)
425 			return ds->ops->port_hwtstamp_get(ds, port, ifr);
426 		break;
427 	case SIOCSHWTSTAMP:
428 		if (ds->ops->port_hwtstamp_set)
429 			return ds->ops->port_hwtstamp_set(ds, port, ifr);
430 		break;
431 	}
432 
433 	return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
434 }
435 
436 static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx,
437 				   const struct switchdev_attr *attr,
438 				   struct netlink_ext_ack *extack)
439 {
440 	struct dsa_port *dp = dsa_slave_to_port(dev);
441 	int ret;
442 
443 	if (ctx && ctx != dp)
444 		return 0;
445 
446 	switch (attr->id) {
447 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
448 		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
449 			return -EOPNOTSUPP;
450 
451 		ret = dsa_port_set_state(dp, attr->u.stp_state, true);
452 		break;
453 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
454 		if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
455 			return -EOPNOTSUPP;
456 
457 		ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
458 					      extack);
459 		break;
460 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
461 		if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
462 			return -EOPNOTSUPP;
463 
464 		ret = dsa_port_ageing_time(dp, attr->u.ageing_time);
465 		break;
466 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
467 		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
468 			return -EOPNOTSUPP;
469 
470 		ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
471 						extack);
472 		break;
473 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
474 		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
475 			return -EOPNOTSUPP;
476 
477 		ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack);
478 		break;
479 	default:
480 		ret = -EOPNOTSUPP;
481 		break;
482 	}
483 
484 	return ret;
485 }
486 
487 /* Must be called under rcu_read_lock() */
488 static int
489 dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave,
490 				      const struct switchdev_obj_port_vlan *vlan)
491 {
492 	struct net_device *upper_dev;
493 	struct list_head *iter;
494 
495 	netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
496 		u16 vid;
497 
498 		if (!is_vlan_dev(upper_dev))
499 			continue;
500 
501 		vid = vlan_dev_vlan_id(upper_dev);
502 		if (vid == vlan->vid)
503 			return -EBUSY;
504 	}
505 
506 	return 0;
507 }
508 
509 static int dsa_slave_vlan_add(struct net_device *dev,
510 			      const struct switchdev_obj *obj,
511 			      struct netlink_ext_ack *extack)
512 {
513 	struct dsa_port *dp = dsa_slave_to_port(dev);
514 	struct switchdev_obj_port_vlan *vlan;
515 	int err;
516 
517 	if (dsa_port_skip_vlan_configuration(dp)) {
518 		NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
519 		return 0;
520 	}
521 
522 	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
523 
524 	/* Deny adding a bridge VLAN when there is already an 802.1Q upper with
525 	 * the same VID.
526 	 */
527 	if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) {
528 		rcu_read_lock();
529 		err = dsa_slave_vlan_check_for_8021q_uppers(dev, vlan);
530 		rcu_read_unlock();
531 		if (err) {
532 			NL_SET_ERR_MSG_MOD(extack,
533 					   "Port already has a VLAN upper with this VID");
534 			return err;
535 		}
536 	}
537 
538 	return dsa_port_vlan_add(dp, vlan, extack);
539 }
540 
541 /* Offload a VLAN installed on the bridge or on a foreign interface by
542  * installing it as a VLAN towards the CPU port.
543  */
544 static int dsa_slave_host_vlan_add(struct net_device *dev,
545 				   const struct switchdev_obj *obj,
546 				   struct netlink_ext_ack *extack)
547 {
548 	struct dsa_port *dp = dsa_slave_to_port(dev);
549 	struct switchdev_obj_port_vlan vlan;
550 
551 	/* Do nothing if this is a software bridge */
552 	if (!dp->bridge)
553 		return -EOPNOTSUPP;
554 
555 	if (dsa_port_skip_vlan_configuration(dp)) {
556 		NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
557 		return 0;
558 	}
559 
560 	vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
561 
562 	/* Even though drivers often handle CPU membership in special ways,
563 	 * it doesn't make sense to program a PVID, so clear this flag.
564 	 */
565 	vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
566 
567 	return dsa_port_host_vlan_add(dp, &vlan, extack);
568 }
569 
570 static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
571 				  const struct switchdev_obj *obj,
572 				  struct netlink_ext_ack *extack)
573 {
574 	struct dsa_port *dp = dsa_slave_to_port(dev);
575 	int err;
576 
577 	if (ctx && ctx != dp)
578 		return 0;
579 
580 	switch (obj->id) {
581 	case SWITCHDEV_OBJ_ID_PORT_MDB:
582 		if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
583 			return -EOPNOTSUPP;
584 
585 		err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
586 		break;
587 	case SWITCHDEV_OBJ_ID_HOST_MDB:
588 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
589 			return -EOPNOTSUPP;
590 
591 		err = dsa_port_bridge_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
592 		break;
593 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
594 		if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
595 			err = dsa_slave_vlan_add(dev, obj, extack);
596 		else
597 			err = dsa_slave_host_vlan_add(dev, obj, extack);
598 		break;
599 	case SWITCHDEV_OBJ_ID_MRP:
600 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
601 			return -EOPNOTSUPP;
602 
603 		err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj));
604 		break;
605 	case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
606 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
607 			return -EOPNOTSUPP;
608 
609 		err = dsa_port_mrp_add_ring_role(dp,
610 						 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
611 		break;
612 	default:
613 		err = -EOPNOTSUPP;
614 		break;
615 	}
616 
617 	return err;
618 }
619 
620 static int dsa_slave_vlan_del(struct net_device *dev,
621 			      const struct switchdev_obj *obj)
622 {
623 	struct dsa_port *dp = dsa_slave_to_port(dev);
624 	struct switchdev_obj_port_vlan *vlan;
625 
626 	if (dsa_port_skip_vlan_configuration(dp))
627 		return 0;
628 
629 	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
630 
631 	return dsa_port_vlan_del(dp, vlan);
632 }
633 
634 static int dsa_slave_host_vlan_del(struct net_device *dev,
635 				   const struct switchdev_obj *obj)
636 {
637 	struct dsa_port *dp = dsa_slave_to_port(dev);
638 	struct switchdev_obj_port_vlan *vlan;
639 
640 	/* Do nothing if this is a software bridge */
641 	if (!dp->bridge)
642 		return -EOPNOTSUPP;
643 
644 	if (dsa_port_skip_vlan_configuration(dp))
645 		return 0;
646 
647 	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
648 
649 	return dsa_port_host_vlan_del(dp, vlan);
650 }
651 
652 static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
653 				  const struct switchdev_obj *obj)
654 {
655 	struct dsa_port *dp = dsa_slave_to_port(dev);
656 	int err;
657 
658 	if (ctx && ctx != dp)
659 		return 0;
660 
661 	switch (obj->id) {
662 	case SWITCHDEV_OBJ_ID_PORT_MDB:
663 		if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
664 			return -EOPNOTSUPP;
665 
666 		err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
667 		break;
668 	case SWITCHDEV_OBJ_ID_HOST_MDB:
669 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
670 			return -EOPNOTSUPP;
671 
672 		err = dsa_port_bridge_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
673 		break;
674 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
675 		if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
676 			err = dsa_slave_vlan_del(dev, obj);
677 		else
678 			err = dsa_slave_host_vlan_del(dev, obj);
679 		break;
680 	case SWITCHDEV_OBJ_ID_MRP:
681 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
682 			return -EOPNOTSUPP;
683 
684 		err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj));
685 		break;
686 	case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
687 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
688 			return -EOPNOTSUPP;
689 
690 		err = dsa_port_mrp_del_ring_role(dp,
691 						 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
692 		break;
693 	default:
694 		err = -EOPNOTSUPP;
695 		break;
696 	}
697 
698 	return err;
699 }
700 
701 static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
702 						     struct sk_buff *skb)
703 {
704 #ifdef CONFIG_NET_POLL_CONTROLLER
705 	struct dsa_slave_priv *p = netdev_priv(dev);
706 
707 	return netpoll_send_skb(p->netpoll, skb);
708 #else
709 	BUG();
710 	return NETDEV_TX_OK;
711 #endif
712 }
713 
714 static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
715 				 struct sk_buff *skb)
716 {
717 	struct dsa_switch *ds = p->dp->ds;
718 
719 	if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
720 		return;
721 
722 	if (!ds->ops->port_txtstamp)
723 		return;
724 
725 	ds->ops->port_txtstamp(ds, p->dp->index, skb);
726 }
727 
728 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
729 {
730 	/* SKB for netpoll still need to be mangled with the protocol-specific
731 	 * tag to be successfully transmitted
732 	 */
733 	if (unlikely(netpoll_tx_running(dev)))
734 		return dsa_slave_netpoll_send_skb(dev, skb);
735 
736 	/* Queue the SKB for transmission on the parent interface, but
737 	 * do not modify its EtherType
738 	 */
739 	skb->dev = dsa_slave_to_master(dev);
740 	dev_queue_xmit(skb);
741 
742 	return NETDEV_TX_OK;
743 }
744 EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
745 
746 static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
747 {
748 	int needed_headroom = dev->needed_headroom;
749 	int needed_tailroom = dev->needed_tailroom;
750 
751 	/* For tail taggers, we need to pad short frames ourselves, to ensure
752 	 * that the tail tag does not fail at its role of being at the end of
753 	 * the packet, once the master interface pads the frame. Account for
754 	 * that pad length here, and pad later.
755 	 */
756 	if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
757 		needed_tailroom += ETH_ZLEN - skb->len;
758 	/* skb_headroom() returns unsigned int... */
759 	needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
760 	needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
761 
762 	if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
763 		/* No reallocation needed, yay! */
764 		return 0;
765 
766 	return pskb_expand_head(skb, needed_headroom, needed_tailroom,
767 				GFP_ATOMIC);
768 }
769 
770 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
771 {
772 	struct dsa_slave_priv *p = netdev_priv(dev);
773 	struct sk_buff *nskb;
774 
775 	dev_sw_netstats_tx_add(dev, 1, skb->len);
776 
777 	memset(skb->cb, 0, sizeof(skb->cb));
778 
779 	/* Handle tx timestamp if any */
780 	dsa_skb_tx_timestamp(p, skb);
781 
782 	if (dsa_realloc_skb(skb, dev)) {
783 		dev_kfree_skb_any(skb);
784 		return NETDEV_TX_OK;
785 	}
786 
787 	/* needed_tailroom should still be 'warm' in the cache line from
788 	 * dsa_realloc_skb(), which has also ensured that padding is safe.
789 	 */
790 	if (dev->needed_tailroom)
791 		eth_skb_pad(skb);
792 
793 	/* Transmit function may have to reallocate the original SKB,
794 	 * in which case it must have freed it. Only free it here on error.
795 	 */
796 	nskb = p->xmit(skb, dev);
797 	if (!nskb) {
798 		kfree_skb(skb);
799 		return NETDEV_TX_OK;
800 	}
801 
802 	return dsa_enqueue_skb(nskb, dev);
803 }
804 
805 /* ethtool operations *******************************************************/
806 
807 static void dsa_slave_get_drvinfo(struct net_device *dev,
808 				  struct ethtool_drvinfo *drvinfo)
809 {
810 	strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
811 	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
812 	strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
813 }
814 
815 static int dsa_slave_get_regs_len(struct net_device *dev)
816 {
817 	struct dsa_port *dp = dsa_slave_to_port(dev);
818 	struct dsa_switch *ds = dp->ds;
819 
820 	if (ds->ops->get_regs_len)
821 		return ds->ops->get_regs_len(ds, dp->index);
822 
823 	return -EOPNOTSUPP;
824 }
825 
826 static void
827 dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
828 {
829 	struct dsa_port *dp = dsa_slave_to_port(dev);
830 	struct dsa_switch *ds = dp->ds;
831 
832 	if (ds->ops->get_regs)
833 		ds->ops->get_regs(ds, dp->index, regs, _p);
834 }
835 
836 static int dsa_slave_nway_reset(struct net_device *dev)
837 {
838 	struct dsa_port *dp = dsa_slave_to_port(dev);
839 
840 	return phylink_ethtool_nway_reset(dp->pl);
841 }
842 
843 static int dsa_slave_get_eeprom_len(struct net_device *dev)
844 {
845 	struct dsa_port *dp = dsa_slave_to_port(dev);
846 	struct dsa_switch *ds = dp->ds;
847 
848 	if (ds->cd && ds->cd->eeprom_len)
849 		return ds->cd->eeprom_len;
850 
851 	if (ds->ops->get_eeprom_len)
852 		return ds->ops->get_eeprom_len(ds);
853 
854 	return 0;
855 }
856 
857 static int dsa_slave_get_eeprom(struct net_device *dev,
858 				struct ethtool_eeprom *eeprom, u8 *data)
859 {
860 	struct dsa_port *dp = dsa_slave_to_port(dev);
861 	struct dsa_switch *ds = dp->ds;
862 
863 	if (ds->ops->get_eeprom)
864 		return ds->ops->get_eeprom(ds, eeprom, data);
865 
866 	return -EOPNOTSUPP;
867 }
868 
869 static int dsa_slave_set_eeprom(struct net_device *dev,
870 				struct ethtool_eeprom *eeprom, u8 *data)
871 {
872 	struct dsa_port *dp = dsa_slave_to_port(dev);
873 	struct dsa_switch *ds = dp->ds;
874 
875 	if (ds->ops->set_eeprom)
876 		return ds->ops->set_eeprom(ds, eeprom, data);
877 
878 	return -EOPNOTSUPP;
879 }
880 
881 static void dsa_slave_get_strings(struct net_device *dev,
882 				  uint32_t stringset, uint8_t *data)
883 {
884 	struct dsa_port *dp = dsa_slave_to_port(dev);
885 	struct dsa_switch *ds = dp->ds;
886 
887 	if (stringset == ETH_SS_STATS) {
888 		int len = ETH_GSTRING_LEN;
889 
890 		strncpy(data, "tx_packets", len);
891 		strncpy(data + len, "tx_bytes", len);
892 		strncpy(data + 2 * len, "rx_packets", len);
893 		strncpy(data + 3 * len, "rx_bytes", len);
894 		if (ds->ops->get_strings)
895 			ds->ops->get_strings(ds, dp->index, stringset,
896 					     data + 4 * len);
897 	} else if (stringset ==  ETH_SS_TEST) {
898 		net_selftest_get_strings(data);
899 	}
900 
901 }
902 
903 static void dsa_slave_get_ethtool_stats(struct net_device *dev,
904 					struct ethtool_stats *stats,
905 					uint64_t *data)
906 {
907 	struct dsa_port *dp = dsa_slave_to_port(dev);
908 	struct dsa_switch *ds = dp->ds;
909 	struct pcpu_sw_netstats *s;
910 	unsigned int start;
911 	int i;
912 
913 	for_each_possible_cpu(i) {
914 		u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
915 
916 		s = per_cpu_ptr(dev->tstats, i);
917 		do {
918 			start = u64_stats_fetch_begin_irq(&s->syncp);
919 			tx_packets = s->tx_packets;
920 			tx_bytes = s->tx_bytes;
921 			rx_packets = s->rx_packets;
922 			rx_bytes = s->rx_bytes;
923 		} while (u64_stats_fetch_retry_irq(&s->syncp, start));
924 		data[0] += tx_packets;
925 		data[1] += tx_bytes;
926 		data[2] += rx_packets;
927 		data[3] += rx_bytes;
928 	}
929 	if (ds->ops->get_ethtool_stats)
930 		ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
931 }
932 
933 static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
934 {
935 	struct dsa_port *dp = dsa_slave_to_port(dev);
936 	struct dsa_switch *ds = dp->ds;
937 
938 	if (sset == ETH_SS_STATS) {
939 		int count = 0;
940 
941 		if (ds->ops->get_sset_count) {
942 			count = ds->ops->get_sset_count(ds, dp->index, sset);
943 			if (count < 0)
944 				return count;
945 		}
946 
947 		return count + 4;
948 	} else if (sset ==  ETH_SS_TEST) {
949 		return net_selftest_get_count();
950 	}
951 
952 	return -EOPNOTSUPP;
953 }
954 
955 static void dsa_slave_get_eth_phy_stats(struct net_device *dev,
956 					struct ethtool_eth_phy_stats *phy_stats)
957 {
958 	struct dsa_port *dp = dsa_slave_to_port(dev);
959 	struct dsa_switch *ds = dp->ds;
960 
961 	if (ds->ops->get_eth_phy_stats)
962 		ds->ops->get_eth_phy_stats(ds, dp->index, phy_stats);
963 }
964 
965 static void dsa_slave_get_eth_mac_stats(struct net_device *dev,
966 					struct ethtool_eth_mac_stats *mac_stats)
967 {
968 	struct dsa_port *dp = dsa_slave_to_port(dev);
969 	struct dsa_switch *ds = dp->ds;
970 
971 	if (ds->ops->get_eth_mac_stats)
972 		ds->ops->get_eth_mac_stats(ds, dp->index, mac_stats);
973 }
974 
975 static void
976 dsa_slave_get_eth_ctrl_stats(struct net_device *dev,
977 			     struct ethtool_eth_ctrl_stats *ctrl_stats)
978 {
979 	struct dsa_port *dp = dsa_slave_to_port(dev);
980 	struct dsa_switch *ds = dp->ds;
981 
982 	if (ds->ops->get_eth_ctrl_stats)
983 		ds->ops->get_eth_ctrl_stats(ds, dp->index, ctrl_stats);
984 }
985 
986 static void dsa_slave_net_selftest(struct net_device *ndev,
987 				   struct ethtool_test *etest, u64 *buf)
988 {
989 	struct dsa_port *dp = dsa_slave_to_port(ndev);
990 	struct dsa_switch *ds = dp->ds;
991 
992 	if (ds->ops->self_test) {
993 		ds->ops->self_test(ds, dp->index, etest, buf);
994 		return;
995 	}
996 
997 	net_selftest(ndev, etest, buf);
998 }
999 
1000 static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
1001 {
1002 	struct dsa_port *dp = dsa_slave_to_port(dev);
1003 	struct dsa_switch *ds = dp->ds;
1004 
1005 	phylink_ethtool_get_wol(dp->pl, w);
1006 
1007 	if (ds->ops->get_wol)
1008 		ds->ops->get_wol(ds, dp->index, w);
1009 }
1010 
1011 static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
1012 {
1013 	struct dsa_port *dp = dsa_slave_to_port(dev);
1014 	struct dsa_switch *ds = dp->ds;
1015 	int ret = -EOPNOTSUPP;
1016 
1017 	phylink_ethtool_set_wol(dp->pl, w);
1018 
1019 	if (ds->ops->set_wol)
1020 		ret = ds->ops->set_wol(ds, dp->index, w);
1021 
1022 	return ret;
1023 }
1024 
1025 static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
1026 {
1027 	struct dsa_port *dp = dsa_slave_to_port(dev);
1028 	struct dsa_switch *ds = dp->ds;
1029 	int ret;
1030 
1031 	/* Port's PHY and MAC both need to be EEE capable */
1032 	if (!dev->phydev || !dp->pl)
1033 		return -ENODEV;
1034 
1035 	if (!ds->ops->set_mac_eee)
1036 		return -EOPNOTSUPP;
1037 
1038 	ret = ds->ops->set_mac_eee(ds, dp->index, e);
1039 	if (ret)
1040 		return ret;
1041 
1042 	return phylink_ethtool_set_eee(dp->pl, e);
1043 }
1044 
1045 static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
1046 {
1047 	struct dsa_port *dp = dsa_slave_to_port(dev);
1048 	struct dsa_switch *ds = dp->ds;
1049 	int ret;
1050 
1051 	/* Port's PHY and MAC both need to be EEE capable */
1052 	if (!dev->phydev || !dp->pl)
1053 		return -ENODEV;
1054 
1055 	if (!ds->ops->get_mac_eee)
1056 		return -EOPNOTSUPP;
1057 
1058 	ret = ds->ops->get_mac_eee(ds, dp->index, e);
1059 	if (ret)
1060 		return ret;
1061 
1062 	return phylink_ethtool_get_eee(dp->pl, e);
1063 }
1064 
1065 static int dsa_slave_get_link_ksettings(struct net_device *dev,
1066 					struct ethtool_link_ksettings *cmd)
1067 {
1068 	struct dsa_port *dp = dsa_slave_to_port(dev);
1069 
1070 	return phylink_ethtool_ksettings_get(dp->pl, cmd);
1071 }
1072 
1073 static int dsa_slave_set_link_ksettings(struct net_device *dev,
1074 					const struct ethtool_link_ksettings *cmd)
1075 {
1076 	struct dsa_port *dp = dsa_slave_to_port(dev);
1077 
1078 	return phylink_ethtool_ksettings_set(dp->pl, cmd);
1079 }
1080 
1081 static void dsa_slave_get_pauseparam(struct net_device *dev,
1082 				     struct ethtool_pauseparam *pause)
1083 {
1084 	struct dsa_port *dp = dsa_slave_to_port(dev);
1085 
1086 	phylink_ethtool_get_pauseparam(dp->pl, pause);
1087 }
1088 
1089 static int dsa_slave_set_pauseparam(struct net_device *dev,
1090 				    struct ethtool_pauseparam *pause)
1091 {
1092 	struct dsa_port *dp = dsa_slave_to_port(dev);
1093 
1094 	return phylink_ethtool_set_pauseparam(dp->pl, pause);
1095 }
1096 
1097 #ifdef CONFIG_NET_POLL_CONTROLLER
1098 static int dsa_slave_netpoll_setup(struct net_device *dev,
1099 				   struct netpoll_info *ni)
1100 {
1101 	struct net_device *master = dsa_slave_to_master(dev);
1102 	struct dsa_slave_priv *p = netdev_priv(dev);
1103 	struct netpoll *netpoll;
1104 	int err = 0;
1105 
1106 	netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
1107 	if (!netpoll)
1108 		return -ENOMEM;
1109 
1110 	err = __netpoll_setup(netpoll, master);
1111 	if (err) {
1112 		kfree(netpoll);
1113 		goto out;
1114 	}
1115 
1116 	p->netpoll = netpoll;
1117 out:
1118 	return err;
1119 }
1120 
1121 static void dsa_slave_netpoll_cleanup(struct net_device *dev)
1122 {
1123 	struct dsa_slave_priv *p = netdev_priv(dev);
1124 	struct netpoll *netpoll = p->netpoll;
1125 
1126 	if (!netpoll)
1127 		return;
1128 
1129 	p->netpoll = NULL;
1130 
1131 	__netpoll_free(netpoll);
1132 }
1133 
1134 static void dsa_slave_poll_controller(struct net_device *dev)
1135 {
1136 }
1137 #endif
1138 
1139 static struct dsa_mall_tc_entry *
1140 dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
1141 {
1142 	struct dsa_slave_priv *p = netdev_priv(dev);
1143 	struct dsa_mall_tc_entry *mall_tc_entry;
1144 
1145 	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
1146 		if (mall_tc_entry->cookie == cookie)
1147 			return mall_tc_entry;
1148 
1149 	return NULL;
1150 }
1151 
1152 static int
1153 dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
1154 				  struct tc_cls_matchall_offload *cls,
1155 				  bool ingress)
1156 {
1157 	struct dsa_port *dp = dsa_slave_to_port(dev);
1158 	struct dsa_slave_priv *p = netdev_priv(dev);
1159 	struct dsa_mall_mirror_tc_entry *mirror;
1160 	struct dsa_mall_tc_entry *mall_tc_entry;
1161 	struct dsa_switch *ds = dp->ds;
1162 	struct flow_action_entry *act;
1163 	struct dsa_port *to_dp;
1164 	int err;
1165 
1166 	if (!ds->ops->port_mirror_add)
1167 		return -EOPNOTSUPP;
1168 
1169 	if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1170 					      cls->common.extack))
1171 		return -EOPNOTSUPP;
1172 
1173 	act = &cls->rule->action.entries[0];
1174 
1175 	if (!act->dev)
1176 		return -EINVAL;
1177 
1178 	if (!dsa_slave_dev_check(act->dev))
1179 		return -EOPNOTSUPP;
1180 
1181 	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1182 	if (!mall_tc_entry)
1183 		return -ENOMEM;
1184 
1185 	mall_tc_entry->cookie = cls->cookie;
1186 	mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
1187 	mirror = &mall_tc_entry->mirror;
1188 
1189 	to_dp = dsa_slave_to_port(act->dev);
1190 
1191 	mirror->to_local_port = to_dp->index;
1192 	mirror->ingress = ingress;
1193 
1194 	err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress);
1195 	if (err) {
1196 		kfree(mall_tc_entry);
1197 		return err;
1198 	}
1199 
1200 	list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1201 
1202 	return err;
1203 }
1204 
1205 static int
1206 dsa_slave_add_cls_matchall_police(struct net_device *dev,
1207 				  struct tc_cls_matchall_offload *cls,
1208 				  bool ingress)
1209 {
1210 	struct netlink_ext_ack *extack = cls->common.extack;
1211 	struct dsa_port *dp = dsa_slave_to_port(dev);
1212 	struct dsa_slave_priv *p = netdev_priv(dev);
1213 	struct dsa_mall_policer_tc_entry *policer;
1214 	struct dsa_mall_tc_entry *mall_tc_entry;
1215 	struct dsa_switch *ds = dp->ds;
1216 	struct flow_action_entry *act;
1217 	int err;
1218 
1219 	if (!ds->ops->port_policer_add) {
1220 		NL_SET_ERR_MSG_MOD(extack,
1221 				   "Policing offload not implemented");
1222 		return -EOPNOTSUPP;
1223 	}
1224 
1225 	if (!ingress) {
1226 		NL_SET_ERR_MSG_MOD(extack,
1227 				   "Only supported on ingress qdisc");
1228 		return -EOPNOTSUPP;
1229 	}
1230 
1231 	if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1232 					      cls->common.extack))
1233 		return -EOPNOTSUPP;
1234 
1235 	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
1236 		if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) {
1237 			NL_SET_ERR_MSG_MOD(extack,
1238 					   "Only one port policer allowed");
1239 			return -EEXIST;
1240 		}
1241 	}
1242 
1243 	act = &cls->rule->action.entries[0];
1244 
1245 	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1246 	if (!mall_tc_entry)
1247 		return -ENOMEM;
1248 
1249 	mall_tc_entry->cookie = cls->cookie;
1250 	mall_tc_entry->type = DSA_PORT_MALL_POLICER;
1251 	policer = &mall_tc_entry->policer;
1252 	policer->rate_bytes_per_sec = act->police.rate_bytes_ps;
1253 	policer->burst = act->police.burst;
1254 
1255 	err = ds->ops->port_policer_add(ds, dp->index, policer);
1256 	if (err) {
1257 		kfree(mall_tc_entry);
1258 		return err;
1259 	}
1260 
1261 	list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1262 
1263 	return err;
1264 }
1265 
1266 static int dsa_slave_add_cls_matchall(struct net_device *dev,
1267 				      struct tc_cls_matchall_offload *cls,
1268 				      bool ingress)
1269 {
1270 	int err = -EOPNOTSUPP;
1271 
1272 	if (cls->common.protocol == htons(ETH_P_ALL) &&
1273 	    flow_offload_has_one_action(&cls->rule->action) &&
1274 	    cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
1275 		err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress);
1276 	else if (flow_offload_has_one_action(&cls->rule->action) &&
1277 		 cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
1278 		err = dsa_slave_add_cls_matchall_police(dev, cls, ingress);
1279 
1280 	return err;
1281 }
1282 
1283 static void dsa_slave_del_cls_matchall(struct net_device *dev,
1284 				       struct tc_cls_matchall_offload *cls)
1285 {
1286 	struct dsa_port *dp = dsa_slave_to_port(dev);
1287 	struct dsa_mall_tc_entry *mall_tc_entry;
1288 	struct dsa_switch *ds = dp->ds;
1289 
1290 	mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
1291 	if (!mall_tc_entry)
1292 		return;
1293 
1294 	list_del(&mall_tc_entry->list);
1295 
1296 	switch (mall_tc_entry->type) {
1297 	case DSA_PORT_MALL_MIRROR:
1298 		if (ds->ops->port_mirror_del)
1299 			ds->ops->port_mirror_del(ds, dp->index,
1300 						 &mall_tc_entry->mirror);
1301 		break;
1302 	case DSA_PORT_MALL_POLICER:
1303 		if (ds->ops->port_policer_del)
1304 			ds->ops->port_policer_del(ds, dp->index);
1305 		break;
1306 	default:
1307 		WARN_ON(1);
1308 	}
1309 
1310 	kfree(mall_tc_entry);
1311 }
1312 
1313 static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
1314 					   struct tc_cls_matchall_offload *cls,
1315 					   bool ingress)
1316 {
1317 	if (cls->common.chain_index)
1318 		return -EOPNOTSUPP;
1319 
1320 	switch (cls->command) {
1321 	case TC_CLSMATCHALL_REPLACE:
1322 		return dsa_slave_add_cls_matchall(dev, cls, ingress);
1323 	case TC_CLSMATCHALL_DESTROY:
1324 		dsa_slave_del_cls_matchall(dev, cls);
1325 		return 0;
1326 	default:
1327 		return -EOPNOTSUPP;
1328 	}
1329 }
1330 
1331 static int dsa_slave_add_cls_flower(struct net_device *dev,
1332 				    struct flow_cls_offload *cls,
1333 				    bool ingress)
1334 {
1335 	struct dsa_port *dp = dsa_slave_to_port(dev);
1336 	struct dsa_switch *ds = dp->ds;
1337 	int port = dp->index;
1338 
1339 	if (!ds->ops->cls_flower_add)
1340 		return -EOPNOTSUPP;
1341 
1342 	return ds->ops->cls_flower_add(ds, port, cls, ingress);
1343 }
1344 
1345 static int dsa_slave_del_cls_flower(struct net_device *dev,
1346 				    struct flow_cls_offload *cls,
1347 				    bool ingress)
1348 {
1349 	struct dsa_port *dp = dsa_slave_to_port(dev);
1350 	struct dsa_switch *ds = dp->ds;
1351 	int port = dp->index;
1352 
1353 	if (!ds->ops->cls_flower_del)
1354 		return -EOPNOTSUPP;
1355 
1356 	return ds->ops->cls_flower_del(ds, port, cls, ingress);
1357 }
1358 
1359 static int dsa_slave_stats_cls_flower(struct net_device *dev,
1360 				      struct flow_cls_offload *cls,
1361 				      bool ingress)
1362 {
1363 	struct dsa_port *dp = dsa_slave_to_port(dev);
1364 	struct dsa_switch *ds = dp->ds;
1365 	int port = dp->index;
1366 
1367 	if (!ds->ops->cls_flower_stats)
1368 		return -EOPNOTSUPP;
1369 
1370 	return ds->ops->cls_flower_stats(ds, port, cls, ingress);
1371 }
1372 
1373 static int dsa_slave_setup_tc_cls_flower(struct net_device *dev,
1374 					 struct flow_cls_offload *cls,
1375 					 bool ingress)
1376 {
1377 	switch (cls->command) {
1378 	case FLOW_CLS_REPLACE:
1379 		return dsa_slave_add_cls_flower(dev, cls, ingress);
1380 	case FLOW_CLS_DESTROY:
1381 		return dsa_slave_del_cls_flower(dev, cls, ingress);
1382 	case FLOW_CLS_STATS:
1383 		return dsa_slave_stats_cls_flower(dev, cls, ingress);
1384 	default:
1385 		return -EOPNOTSUPP;
1386 	}
1387 }
1388 
1389 static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1390 				       void *cb_priv, bool ingress)
1391 {
1392 	struct net_device *dev = cb_priv;
1393 
1394 	if (!tc_can_offload(dev))
1395 		return -EOPNOTSUPP;
1396 
1397 	switch (type) {
1398 	case TC_SETUP_CLSMATCHALL:
1399 		return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
1400 	case TC_SETUP_CLSFLOWER:
1401 		return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress);
1402 	default:
1403 		return -EOPNOTSUPP;
1404 	}
1405 }
1406 
1407 static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
1408 					  void *type_data, void *cb_priv)
1409 {
1410 	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
1411 }
1412 
1413 static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
1414 					  void *type_data, void *cb_priv)
1415 {
1416 	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
1417 }
1418 
1419 static LIST_HEAD(dsa_slave_block_cb_list);
1420 
1421 static int dsa_slave_setup_tc_block(struct net_device *dev,
1422 				    struct flow_block_offload *f)
1423 {
1424 	struct flow_block_cb *block_cb;
1425 	flow_setup_cb_t *cb;
1426 
1427 	if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1428 		cb = dsa_slave_setup_tc_block_cb_ig;
1429 	else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1430 		cb = dsa_slave_setup_tc_block_cb_eg;
1431 	else
1432 		return -EOPNOTSUPP;
1433 
1434 	f->driver_block_list = &dsa_slave_block_cb_list;
1435 
1436 	switch (f->command) {
1437 	case FLOW_BLOCK_BIND:
1438 		if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
1439 			return -EBUSY;
1440 
1441 		block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
1442 		if (IS_ERR(block_cb))
1443 			return PTR_ERR(block_cb);
1444 
1445 		flow_block_cb_add(block_cb, f);
1446 		list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
1447 		return 0;
1448 	case FLOW_BLOCK_UNBIND:
1449 		block_cb = flow_block_cb_lookup(f->block, cb, dev);
1450 		if (!block_cb)
1451 			return -ENOENT;
1452 
1453 		flow_block_cb_remove(block_cb, f);
1454 		list_del(&block_cb->driver_list);
1455 		return 0;
1456 	default:
1457 		return -EOPNOTSUPP;
1458 	}
1459 }
1460 
1461 static int dsa_slave_setup_ft_block(struct dsa_switch *ds, int port,
1462 				    void *type_data)
1463 {
1464 	struct dsa_port *cpu_dp = dsa_to_port(ds, port)->cpu_dp;
1465 	struct net_device *master = cpu_dp->master;
1466 
1467 	if (!master->netdev_ops->ndo_setup_tc)
1468 		return -EOPNOTSUPP;
1469 
1470 	return master->netdev_ops->ndo_setup_tc(master, TC_SETUP_FT, type_data);
1471 }
1472 
1473 static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
1474 			      void *type_data)
1475 {
1476 	struct dsa_port *dp = dsa_slave_to_port(dev);
1477 	struct dsa_switch *ds = dp->ds;
1478 
1479 	switch (type) {
1480 	case TC_SETUP_BLOCK:
1481 		return dsa_slave_setup_tc_block(dev, type_data);
1482 	case TC_SETUP_FT:
1483 		return dsa_slave_setup_ft_block(ds, dp->index, type_data);
1484 	default:
1485 		break;
1486 	}
1487 
1488 	if (!ds->ops->port_setup_tc)
1489 		return -EOPNOTSUPP;
1490 
1491 	return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
1492 }
1493 
1494 static int dsa_slave_get_rxnfc(struct net_device *dev,
1495 			       struct ethtool_rxnfc *nfc, u32 *rule_locs)
1496 {
1497 	struct dsa_port *dp = dsa_slave_to_port(dev);
1498 	struct dsa_switch *ds = dp->ds;
1499 
1500 	if (!ds->ops->get_rxnfc)
1501 		return -EOPNOTSUPP;
1502 
1503 	return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
1504 }
1505 
1506 static int dsa_slave_set_rxnfc(struct net_device *dev,
1507 			       struct ethtool_rxnfc *nfc)
1508 {
1509 	struct dsa_port *dp = dsa_slave_to_port(dev);
1510 	struct dsa_switch *ds = dp->ds;
1511 
1512 	if (!ds->ops->set_rxnfc)
1513 		return -EOPNOTSUPP;
1514 
1515 	return ds->ops->set_rxnfc(ds, dp->index, nfc);
1516 }
1517 
1518 static int dsa_slave_get_ts_info(struct net_device *dev,
1519 				 struct ethtool_ts_info *ts)
1520 {
1521 	struct dsa_slave_priv *p = netdev_priv(dev);
1522 	struct dsa_switch *ds = p->dp->ds;
1523 
1524 	if (!ds->ops->get_ts_info)
1525 		return -EOPNOTSUPP;
1526 
1527 	return ds->ops->get_ts_info(ds, p->dp->index, ts);
1528 }
1529 
1530 static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
1531 				     u16 vid)
1532 {
1533 	struct dsa_port *dp = dsa_slave_to_port(dev);
1534 	struct switchdev_obj_port_vlan vlan = {
1535 		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
1536 		.vid = vid,
1537 		/* This API only allows programming tagged, non-PVID VIDs */
1538 		.flags = 0,
1539 	};
1540 	struct netlink_ext_ack extack = {0};
1541 	int ret;
1542 
1543 	/* User port... */
1544 	ret = dsa_port_vlan_add(dp, &vlan, &extack);
1545 	if (ret) {
1546 		if (extack._msg)
1547 			netdev_err(dev, "%s\n", extack._msg);
1548 		return ret;
1549 	}
1550 
1551 	/* And CPU port... */
1552 	ret = dsa_port_host_vlan_add(dp, &vlan, &extack);
1553 	if (ret) {
1554 		if (extack._msg)
1555 			netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index,
1556 				   extack._msg);
1557 		return ret;
1558 	}
1559 
1560 	return 0;
1561 }
1562 
1563 static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
1564 				      u16 vid)
1565 {
1566 	struct dsa_port *dp = dsa_slave_to_port(dev);
1567 	struct switchdev_obj_port_vlan vlan = {
1568 		.vid = vid,
1569 		/* This API only allows programming tagged, non-PVID VIDs */
1570 		.flags = 0,
1571 	};
1572 	int err;
1573 
1574 	err = dsa_port_vlan_del(dp, &vlan);
1575 	if (err)
1576 		return err;
1577 
1578 	return dsa_port_host_vlan_del(dp, &vlan);
1579 }
1580 
1581 static int dsa_slave_restore_vlan(struct net_device *vdev, int vid, void *arg)
1582 {
1583 	__be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
1584 
1585 	return dsa_slave_vlan_rx_add_vid(arg, proto, vid);
1586 }
1587 
1588 static int dsa_slave_clear_vlan(struct net_device *vdev, int vid, void *arg)
1589 {
1590 	__be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
1591 
1592 	return dsa_slave_vlan_rx_kill_vid(arg, proto, vid);
1593 }
1594 
1595 /* Keep the VLAN RX filtering list in sync with the hardware only if VLAN
1596  * filtering is enabled. The baseline is that only ports that offload a
1597  * VLAN-aware bridge are VLAN-aware, and standalone ports are VLAN-unaware,
1598  * but there are exceptions for quirky hardware.
1599  *
1600  * If ds->vlan_filtering_is_global = true, then standalone ports which share
1601  * the same switch with other ports that offload a VLAN-aware bridge are also
1602  * inevitably VLAN-aware.
1603  *
1604  * To summarize, a DSA switch port offloads:
1605  *
1606  * - If standalone (this includes software bridge, software LAG):
1607  *     - if ds->needs_standalone_vlan_filtering = true, OR if
1608  *       (ds->vlan_filtering_is_global = true AND there are bridges spanning
1609  *       this switch chip which have vlan_filtering=1)
1610  *         - the 8021q upper VLANs
1611  *     - else (standalone VLAN filtering is not needed, VLAN filtering is not
1612  *       global, or it is, but no port is under a VLAN-aware bridge):
1613  *         - no VLAN (any 8021q upper is a software VLAN)
1614  *
1615  * - If under a vlan_filtering=0 bridge which it offload:
1616  *     - if ds->configure_vlan_while_not_filtering = true (default):
1617  *         - the bridge VLANs. These VLANs are committed to hardware but inactive.
1618  *     - else (deprecated):
1619  *         - no VLAN. The bridge VLANs are not restored when VLAN awareness is
1620  *           enabled, so this behavior is broken and discouraged.
1621  *
1622  * - If under a vlan_filtering=1 bridge which it offload:
1623  *     - the bridge VLANs
1624  *     - the 8021q upper VLANs
1625  */
1626 int dsa_slave_manage_vlan_filtering(struct net_device *slave,
1627 				    bool vlan_filtering)
1628 {
1629 	int err;
1630 
1631 	if (vlan_filtering) {
1632 		slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1633 
1634 		err = vlan_for_each(slave, dsa_slave_restore_vlan, slave);
1635 		if (err) {
1636 			vlan_for_each(slave, dsa_slave_clear_vlan, slave);
1637 			slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1638 			return err;
1639 		}
1640 	} else {
1641 		err = vlan_for_each(slave, dsa_slave_clear_vlan, slave);
1642 		if (err)
1643 			return err;
1644 
1645 		slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1646 	}
1647 
1648 	return 0;
1649 }
1650 
1651 struct dsa_hw_port {
1652 	struct list_head list;
1653 	struct net_device *dev;
1654 	int old_mtu;
1655 };
1656 
1657 static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu)
1658 {
1659 	const struct dsa_hw_port *p;
1660 	int err;
1661 
1662 	list_for_each_entry(p, hw_port_list, list) {
1663 		if (p->dev->mtu == mtu)
1664 			continue;
1665 
1666 		err = dev_set_mtu(p->dev, mtu);
1667 		if (err)
1668 			goto rollback;
1669 	}
1670 
1671 	return 0;
1672 
1673 rollback:
1674 	list_for_each_entry_continue_reverse(p, hw_port_list, list) {
1675 		if (p->dev->mtu == p->old_mtu)
1676 			continue;
1677 
1678 		if (dev_set_mtu(p->dev, p->old_mtu))
1679 			netdev_err(p->dev, "Failed to restore MTU\n");
1680 	}
1681 
1682 	return err;
1683 }
1684 
1685 static void dsa_hw_port_list_free(struct list_head *hw_port_list)
1686 {
1687 	struct dsa_hw_port *p, *n;
1688 
1689 	list_for_each_entry_safe(p, n, hw_port_list, list)
1690 		kfree(p);
1691 }
1692 
1693 /* Make the hardware datapath to/from @dev limited to a common MTU */
1694 static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
1695 {
1696 	struct list_head hw_port_list;
1697 	struct dsa_switch_tree *dst;
1698 	int min_mtu = ETH_MAX_MTU;
1699 	struct dsa_port *other_dp;
1700 	int err;
1701 
1702 	if (!dp->ds->mtu_enforcement_ingress)
1703 		return;
1704 
1705 	if (!dp->bridge)
1706 		return;
1707 
1708 	INIT_LIST_HEAD(&hw_port_list);
1709 
1710 	/* Populate the list of ports that are part of the same bridge
1711 	 * as the newly added/modified port
1712 	 */
1713 	list_for_each_entry(dst, &dsa_tree_list, list) {
1714 		list_for_each_entry(other_dp, &dst->ports, list) {
1715 			struct dsa_hw_port *hw_port;
1716 			struct net_device *slave;
1717 
1718 			if (other_dp->type != DSA_PORT_TYPE_USER)
1719 				continue;
1720 
1721 			if (!dsa_port_bridge_same(dp, other_dp))
1722 				continue;
1723 
1724 			if (!other_dp->ds->mtu_enforcement_ingress)
1725 				continue;
1726 
1727 			slave = other_dp->slave;
1728 
1729 			if (min_mtu > slave->mtu)
1730 				min_mtu = slave->mtu;
1731 
1732 			hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
1733 			if (!hw_port)
1734 				goto out;
1735 
1736 			hw_port->dev = slave;
1737 			hw_port->old_mtu = slave->mtu;
1738 
1739 			list_add(&hw_port->list, &hw_port_list);
1740 		}
1741 	}
1742 
1743 	/* Attempt to configure the entire hardware bridge to the newly added
1744 	 * interface's MTU first, regardless of whether the intention of the
1745 	 * user was to raise or lower it.
1746 	 */
1747 	err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu);
1748 	if (!err)
1749 		goto out;
1750 
1751 	/* Clearly that didn't work out so well, so just set the minimum MTU on
1752 	 * all hardware bridge ports now. If this fails too, then all ports will
1753 	 * still have their old MTU rolled back anyway.
1754 	 */
1755 	dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu);
1756 
1757 out:
1758 	dsa_hw_port_list_free(&hw_port_list);
1759 }
1760 
1761 int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
1762 {
1763 	struct net_device *master = dsa_slave_to_master(dev);
1764 	struct dsa_port *dp = dsa_slave_to_port(dev);
1765 	struct dsa_slave_priv *p = netdev_priv(dev);
1766 	struct dsa_switch *ds = p->dp->ds;
1767 	struct dsa_port *dp_iter;
1768 	struct dsa_port *cpu_dp;
1769 	int port = p->dp->index;
1770 	int largest_mtu = 0;
1771 	int new_master_mtu;
1772 	int old_master_mtu;
1773 	int mtu_limit;
1774 	int cpu_mtu;
1775 	int err;
1776 
1777 	if (!ds->ops->port_change_mtu)
1778 		return -EOPNOTSUPP;
1779 
1780 	list_for_each_entry(dp_iter, &ds->dst->ports, list) {
1781 		int slave_mtu;
1782 
1783 		if (!dsa_port_is_user(dp_iter))
1784 			continue;
1785 
1786 		/* During probe, this function will be called for each slave
1787 		 * device, while not all of them have been allocated. That's
1788 		 * ok, it doesn't change what the maximum is, so ignore it.
1789 		 */
1790 		if (!dp_iter->slave)
1791 			continue;
1792 
1793 		/* Pretend that we already applied the setting, which we
1794 		 * actually haven't (still haven't done all integrity checks)
1795 		 */
1796 		if (dp_iter == dp)
1797 			slave_mtu = new_mtu;
1798 		else
1799 			slave_mtu = dp_iter->slave->mtu;
1800 
1801 		if (largest_mtu < slave_mtu)
1802 			largest_mtu = slave_mtu;
1803 	}
1804 
1805 	cpu_dp = dsa_to_port(ds, port)->cpu_dp;
1806 
1807 	mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
1808 	old_master_mtu = master->mtu;
1809 	new_master_mtu = largest_mtu + dsa_tag_protocol_overhead(cpu_dp->tag_ops);
1810 	if (new_master_mtu > mtu_limit)
1811 		return -ERANGE;
1812 
1813 	/* If the master MTU isn't over limit, there's no need to check the CPU
1814 	 * MTU, since that surely isn't either.
1815 	 */
1816 	cpu_mtu = largest_mtu;
1817 
1818 	/* Start applying stuff */
1819 	if (new_master_mtu != old_master_mtu) {
1820 		err = dev_set_mtu(master, new_master_mtu);
1821 		if (err < 0)
1822 			goto out_master_failed;
1823 
1824 		/* We only need to propagate the MTU of the CPU port to
1825 		 * upstream switches, so create a non-targeted notifier which
1826 		 * updates all switches.
1827 		 */
1828 		err = dsa_port_mtu_change(cpu_dp, cpu_mtu, false);
1829 		if (err)
1830 			goto out_cpu_failed;
1831 	}
1832 
1833 	err = dsa_port_mtu_change(dp, new_mtu, true);
1834 	if (err)
1835 		goto out_port_failed;
1836 
1837 	dev->mtu = new_mtu;
1838 
1839 	dsa_bridge_mtu_normalization(dp);
1840 
1841 	return 0;
1842 
1843 out_port_failed:
1844 	if (new_master_mtu != old_master_mtu)
1845 		dsa_port_mtu_change(cpu_dp, old_master_mtu -
1846 				    dsa_tag_protocol_overhead(cpu_dp->tag_ops),
1847 				    false);
1848 out_cpu_failed:
1849 	if (new_master_mtu != old_master_mtu)
1850 		dev_set_mtu(master, old_master_mtu);
1851 out_master_failed:
1852 	return err;
1853 }
1854 
1855 static const struct ethtool_ops dsa_slave_ethtool_ops = {
1856 	.get_drvinfo		= dsa_slave_get_drvinfo,
1857 	.get_regs_len		= dsa_slave_get_regs_len,
1858 	.get_regs		= dsa_slave_get_regs,
1859 	.nway_reset		= dsa_slave_nway_reset,
1860 	.get_link		= ethtool_op_get_link,
1861 	.get_eeprom_len		= dsa_slave_get_eeprom_len,
1862 	.get_eeprom		= dsa_slave_get_eeprom,
1863 	.set_eeprom		= dsa_slave_set_eeprom,
1864 	.get_strings		= dsa_slave_get_strings,
1865 	.get_ethtool_stats	= dsa_slave_get_ethtool_stats,
1866 	.get_sset_count		= dsa_slave_get_sset_count,
1867 	.get_eth_phy_stats	= dsa_slave_get_eth_phy_stats,
1868 	.get_eth_mac_stats	= dsa_slave_get_eth_mac_stats,
1869 	.get_eth_ctrl_stats	= dsa_slave_get_eth_ctrl_stats,
1870 	.set_wol		= dsa_slave_set_wol,
1871 	.get_wol		= dsa_slave_get_wol,
1872 	.set_eee		= dsa_slave_set_eee,
1873 	.get_eee		= dsa_slave_get_eee,
1874 	.get_link_ksettings	= dsa_slave_get_link_ksettings,
1875 	.set_link_ksettings	= dsa_slave_set_link_ksettings,
1876 	.get_pauseparam		= dsa_slave_get_pauseparam,
1877 	.set_pauseparam		= dsa_slave_set_pauseparam,
1878 	.get_rxnfc		= dsa_slave_get_rxnfc,
1879 	.set_rxnfc		= dsa_slave_set_rxnfc,
1880 	.get_ts_info		= dsa_slave_get_ts_info,
1881 	.self_test		= dsa_slave_net_selftest,
1882 };
1883 
1884 static struct devlink_port *dsa_slave_get_devlink_port(struct net_device *dev)
1885 {
1886 	struct dsa_port *dp = dsa_slave_to_port(dev);
1887 
1888 	return &dp->devlink_port;
1889 }
1890 
1891 static void dsa_slave_get_stats64(struct net_device *dev,
1892 				  struct rtnl_link_stats64 *s)
1893 {
1894 	struct dsa_port *dp = dsa_slave_to_port(dev);
1895 	struct dsa_switch *ds = dp->ds;
1896 
1897 	if (ds->ops->get_stats64)
1898 		ds->ops->get_stats64(ds, dp->index, s);
1899 	else
1900 		dev_get_tstats64(dev, s);
1901 }
1902 
1903 static int dsa_slave_fill_forward_path(struct net_device_path_ctx *ctx,
1904 				       struct net_device_path *path)
1905 {
1906 	struct dsa_port *dp = dsa_slave_to_port(ctx->dev);
1907 	struct dsa_port *cpu_dp = dp->cpu_dp;
1908 
1909 	path->dev = ctx->dev;
1910 	path->type = DEV_PATH_DSA;
1911 	path->dsa.proto = cpu_dp->tag_ops->proto;
1912 	path->dsa.port = dp->index;
1913 	ctx->dev = cpu_dp->master;
1914 
1915 	return 0;
1916 }
1917 
1918 static const struct net_device_ops dsa_slave_netdev_ops = {
1919 	.ndo_open	 	= dsa_slave_open,
1920 	.ndo_stop		= dsa_slave_close,
1921 	.ndo_start_xmit		= dsa_slave_xmit,
1922 	.ndo_change_rx_flags	= dsa_slave_change_rx_flags,
1923 	.ndo_set_rx_mode	= dsa_slave_set_rx_mode,
1924 	.ndo_set_mac_address	= dsa_slave_set_mac_address,
1925 	.ndo_fdb_dump		= dsa_slave_fdb_dump,
1926 	.ndo_eth_ioctl		= dsa_slave_ioctl,
1927 	.ndo_get_iflink		= dsa_slave_get_iflink,
1928 #ifdef CONFIG_NET_POLL_CONTROLLER
1929 	.ndo_netpoll_setup	= dsa_slave_netpoll_setup,
1930 	.ndo_netpoll_cleanup	= dsa_slave_netpoll_cleanup,
1931 	.ndo_poll_controller	= dsa_slave_poll_controller,
1932 #endif
1933 	.ndo_setup_tc		= dsa_slave_setup_tc,
1934 	.ndo_get_stats64	= dsa_slave_get_stats64,
1935 	.ndo_vlan_rx_add_vid	= dsa_slave_vlan_rx_add_vid,
1936 	.ndo_vlan_rx_kill_vid	= dsa_slave_vlan_rx_kill_vid,
1937 	.ndo_get_devlink_port	= dsa_slave_get_devlink_port,
1938 	.ndo_change_mtu		= dsa_slave_change_mtu,
1939 	.ndo_fill_forward_path	= dsa_slave_fill_forward_path,
1940 };
1941 
1942 static struct device_type dsa_type = {
1943 	.name	= "dsa",
1944 };
1945 
1946 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
1947 {
1948 	const struct dsa_port *dp = dsa_to_port(ds, port);
1949 
1950 	if (dp->pl)
1951 		phylink_mac_change(dp->pl, up);
1952 }
1953 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
1954 
1955 static void dsa_slave_phylink_fixed_state(struct phylink_config *config,
1956 					  struct phylink_link_state *state)
1957 {
1958 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1959 	struct dsa_switch *ds = dp->ds;
1960 
1961 	/* No need to check that this operation is valid, the callback would
1962 	 * not be called if it was not.
1963 	 */
1964 	ds->ops->phylink_fixed_state(ds, dp->index, state);
1965 }
1966 
1967 /* slave device setup *******************************************************/
1968 static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr,
1969 				 u32 flags)
1970 {
1971 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1972 	struct dsa_switch *ds = dp->ds;
1973 
1974 	slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
1975 	if (!slave_dev->phydev) {
1976 		netdev_err(slave_dev, "no phy at %d\n", addr);
1977 		return -ENODEV;
1978 	}
1979 
1980 	slave_dev->phydev->dev_flags |= flags;
1981 
1982 	return phylink_connect_phy(dp->pl, slave_dev->phydev);
1983 }
1984 
1985 static int dsa_slave_phy_setup(struct net_device *slave_dev)
1986 {
1987 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1988 	struct device_node *port_dn = dp->dn;
1989 	struct dsa_switch *ds = dp->ds;
1990 	u32 phy_flags = 0;
1991 	int ret;
1992 
1993 	dp->pl_config.dev = &slave_dev->dev;
1994 	dp->pl_config.type = PHYLINK_NETDEV;
1995 
1996 	/* The get_fixed_state callback takes precedence over polling the
1997 	 * link GPIO in PHYLINK (see phylink_get_fixed_state).  Only set
1998 	 * this if the switch provides such a callback.
1999 	 */
2000 	if (ds->ops->phylink_fixed_state) {
2001 		dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state;
2002 		dp->pl_config.poll_fixed_state = true;
2003 	}
2004 
2005 	ret = dsa_port_phylink_create(dp);
2006 	if (ret)
2007 		return ret;
2008 
2009 	if (ds->ops->get_phy_flags)
2010 		phy_flags = ds->ops->get_phy_flags(ds, dp->index);
2011 
2012 	ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
2013 	if (ret == -ENODEV && ds->slave_mii_bus) {
2014 		/* We could not connect to a designated PHY or SFP, so try to
2015 		 * use the switch internal MDIO bus instead
2016 		 */
2017 		ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags);
2018 	}
2019 	if (ret) {
2020 		netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
2021 			   ERR_PTR(ret));
2022 		phylink_destroy(dp->pl);
2023 	}
2024 
2025 	return ret;
2026 }
2027 
2028 void dsa_slave_setup_tagger(struct net_device *slave)
2029 {
2030 	struct dsa_port *dp = dsa_slave_to_port(slave);
2031 	struct dsa_slave_priv *p = netdev_priv(slave);
2032 	const struct dsa_port *cpu_dp = dp->cpu_dp;
2033 	struct net_device *master = cpu_dp->master;
2034 	const struct dsa_switch *ds = dp->ds;
2035 
2036 	slave->needed_headroom = cpu_dp->tag_ops->needed_headroom;
2037 	slave->needed_tailroom = cpu_dp->tag_ops->needed_tailroom;
2038 	/* Try to save one extra realloc later in the TX path (in the master)
2039 	 * by also inheriting the master's needed headroom and tailroom.
2040 	 * The 8021q driver also does this.
2041 	 */
2042 	slave->needed_headroom += master->needed_headroom;
2043 	slave->needed_tailroom += master->needed_tailroom;
2044 
2045 	p->xmit = cpu_dp->tag_ops->xmit;
2046 
2047 	slave->features = master->vlan_features | NETIF_F_HW_TC;
2048 	slave->hw_features |= NETIF_F_HW_TC;
2049 	slave->features |= NETIF_F_LLTX;
2050 	if (slave->needed_tailroom)
2051 		slave->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
2052 	if (ds->needs_standalone_vlan_filtering)
2053 		slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2054 }
2055 
2056 int dsa_slave_suspend(struct net_device *slave_dev)
2057 {
2058 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2059 
2060 	if (!netif_running(slave_dev))
2061 		return 0;
2062 
2063 	netif_device_detach(slave_dev);
2064 
2065 	rtnl_lock();
2066 	phylink_stop(dp->pl);
2067 	rtnl_unlock();
2068 
2069 	return 0;
2070 }
2071 
2072 int dsa_slave_resume(struct net_device *slave_dev)
2073 {
2074 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2075 
2076 	if (!netif_running(slave_dev))
2077 		return 0;
2078 
2079 	netif_device_attach(slave_dev);
2080 
2081 	rtnl_lock();
2082 	phylink_start(dp->pl);
2083 	rtnl_unlock();
2084 
2085 	return 0;
2086 }
2087 
2088 int dsa_slave_create(struct dsa_port *port)
2089 {
2090 	const struct dsa_port *cpu_dp = port->cpu_dp;
2091 	struct net_device *master = cpu_dp->master;
2092 	struct dsa_switch *ds = port->ds;
2093 	const char *name = port->name;
2094 	struct net_device *slave_dev;
2095 	struct dsa_slave_priv *p;
2096 	int ret;
2097 
2098 	if (!ds->num_tx_queues)
2099 		ds->num_tx_queues = 1;
2100 
2101 	slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
2102 				     NET_NAME_UNKNOWN, ether_setup,
2103 				     ds->num_tx_queues, 1);
2104 	if (slave_dev == NULL)
2105 		return -ENOMEM;
2106 
2107 	slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
2108 	if (!is_zero_ether_addr(port->mac))
2109 		eth_hw_addr_set(slave_dev, port->mac);
2110 	else
2111 		eth_hw_addr_inherit(slave_dev, master);
2112 	slave_dev->priv_flags |= IFF_NO_QUEUE;
2113 	if (dsa_switch_supports_uc_filtering(ds))
2114 		slave_dev->priv_flags |= IFF_UNICAST_FLT;
2115 	slave_dev->netdev_ops = &dsa_slave_netdev_ops;
2116 	if (ds->ops->port_max_mtu)
2117 		slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
2118 	SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
2119 
2120 	SET_NETDEV_DEV(slave_dev, port->ds->dev);
2121 	slave_dev->dev.of_node = port->dn;
2122 	slave_dev->vlan_features = master->vlan_features;
2123 
2124 	p = netdev_priv(slave_dev);
2125 	slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2126 	if (!slave_dev->tstats) {
2127 		free_netdev(slave_dev);
2128 		return -ENOMEM;
2129 	}
2130 
2131 	ret = gro_cells_init(&p->gcells, slave_dev);
2132 	if (ret)
2133 		goto out_free;
2134 
2135 	p->dp = port;
2136 	INIT_LIST_HEAD(&p->mall_tc_list);
2137 	port->slave = slave_dev;
2138 	dsa_slave_setup_tagger(slave_dev);
2139 
2140 	netif_carrier_off(slave_dev);
2141 
2142 	ret = dsa_slave_phy_setup(slave_dev);
2143 	if (ret) {
2144 		netdev_err(slave_dev,
2145 			   "error %d setting up PHY for tree %d, switch %d, port %d\n",
2146 			   ret, ds->dst->index, ds->index, port->index);
2147 		goto out_gcells;
2148 	}
2149 
2150 	rtnl_lock();
2151 
2152 	ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
2153 	if (ret && ret != -EOPNOTSUPP)
2154 		dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
2155 			 ret, ETH_DATA_LEN, port->index);
2156 
2157 	ret = register_netdevice(slave_dev);
2158 	if (ret) {
2159 		netdev_err(master, "error %d registering interface %s\n",
2160 			   ret, slave_dev->name);
2161 		rtnl_unlock();
2162 		goto out_phy;
2163 	}
2164 
2165 	ret = netdev_upper_dev_link(master, slave_dev, NULL);
2166 
2167 	rtnl_unlock();
2168 
2169 	if (ret)
2170 		goto out_unregister;
2171 
2172 	return 0;
2173 
2174 out_unregister:
2175 	unregister_netdev(slave_dev);
2176 out_phy:
2177 	rtnl_lock();
2178 	phylink_disconnect_phy(p->dp->pl);
2179 	rtnl_unlock();
2180 	phylink_destroy(p->dp->pl);
2181 out_gcells:
2182 	gro_cells_destroy(&p->gcells);
2183 out_free:
2184 	free_percpu(slave_dev->tstats);
2185 	free_netdev(slave_dev);
2186 	port->slave = NULL;
2187 	return ret;
2188 }
2189 
2190 void dsa_slave_destroy(struct net_device *slave_dev)
2191 {
2192 	struct net_device *master = dsa_slave_to_master(slave_dev);
2193 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2194 	struct dsa_slave_priv *p = netdev_priv(slave_dev);
2195 
2196 	netif_carrier_off(slave_dev);
2197 	rtnl_lock();
2198 	netdev_upper_dev_unlink(master, slave_dev);
2199 	unregister_netdevice(slave_dev);
2200 	phylink_disconnect_phy(dp->pl);
2201 	rtnl_unlock();
2202 
2203 	phylink_destroy(dp->pl);
2204 	gro_cells_destroy(&p->gcells);
2205 	free_percpu(slave_dev->tstats);
2206 	free_netdev(slave_dev);
2207 }
2208 
2209 bool dsa_slave_dev_check(const struct net_device *dev)
2210 {
2211 	return dev->netdev_ops == &dsa_slave_netdev_ops;
2212 }
2213 EXPORT_SYMBOL_GPL(dsa_slave_dev_check);
2214 
2215 static int dsa_slave_changeupper(struct net_device *dev,
2216 				 struct netdev_notifier_changeupper_info *info)
2217 {
2218 	struct dsa_port *dp = dsa_slave_to_port(dev);
2219 	struct netlink_ext_ack *extack;
2220 	int err = NOTIFY_DONE;
2221 
2222 	extack = netdev_notifier_info_to_extack(&info->info);
2223 
2224 	if (netif_is_bridge_master(info->upper_dev)) {
2225 		if (info->linking) {
2226 			err = dsa_port_bridge_join(dp, info->upper_dev, extack);
2227 			if (!err)
2228 				dsa_bridge_mtu_normalization(dp);
2229 			if (err == -EOPNOTSUPP) {
2230 				NL_SET_ERR_MSG_MOD(extack,
2231 						   "Offloading not supported");
2232 				err = 0;
2233 			}
2234 			err = notifier_from_errno(err);
2235 		} else {
2236 			dsa_port_bridge_leave(dp, info->upper_dev);
2237 			err = NOTIFY_OK;
2238 		}
2239 	} else if (netif_is_lag_master(info->upper_dev)) {
2240 		if (info->linking) {
2241 			err = dsa_port_lag_join(dp, info->upper_dev,
2242 						info->upper_info, extack);
2243 			if (err == -EOPNOTSUPP) {
2244 				NL_SET_ERR_MSG_MOD(info->info.extack,
2245 						   "Offloading not supported");
2246 				err = 0;
2247 			}
2248 			err = notifier_from_errno(err);
2249 		} else {
2250 			dsa_port_lag_leave(dp, info->upper_dev);
2251 			err = NOTIFY_OK;
2252 		}
2253 	} else if (is_hsr_master(info->upper_dev)) {
2254 		if (info->linking) {
2255 			err = dsa_port_hsr_join(dp, info->upper_dev);
2256 			if (err == -EOPNOTSUPP) {
2257 				NL_SET_ERR_MSG_MOD(info->info.extack,
2258 						   "Offloading not supported");
2259 				err = 0;
2260 			}
2261 			err = notifier_from_errno(err);
2262 		} else {
2263 			dsa_port_hsr_leave(dp, info->upper_dev);
2264 			err = NOTIFY_OK;
2265 		}
2266 	}
2267 
2268 	return err;
2269 }
2270 
2271 static int dsa_slave_prechangeupper(struct net_device *dev,
2272 				    struct netdev_notifier_changeupper_info *info)
2273 {
2274 	struct dsa_port *dp = dsa_slave_to_port(dev);
2275 
2276 	if (netif_is_bridge_master(info->upper_dev) && !info->linking)
2277 		dsa_port_pre_bridge_leave(dp, info->upper_dev);
2278 	else if (netif_is_lag_master(info->upper_dev) && !info->linking)
2279 		dsa_port_pre_lag_leave(dp, info->upper_dev);
2280 	/* dsa_port_pre_hsr_leave is not yet necessary since hsr cannot be
2281 	 * meaningfully enslaved to a bridge yet
2282 	 */
2283 
2284 	return NOTIFY_DONE;
2285 }
2286 
2287 static int
2288 dsa_slave_lag_changeupper(struct net_device *dev,
2289 			  struct netdev_notifier_changeupper_info *info)
2290 {
2291 	struct net_device *lower;
2292 	struct list_head *iter;
2293 	int err = NOTIFY_DONE;
2294 	struct dsa_port *dp;
2295 
2296 	netdev_for_each_lower_dev(dev, lower, iter) {
2297 		if (!dsa_slave_dev_check(lower))
2298 			continue;
2299 
2300 		dp = dsa_slave_to_port(lower);
2301 		if (!dp->lag)
2302 			/* Software LAG */
2303 			continue;
2304 
2305 		err = dsa_slave_changeupper(lower, info);
2306 		if (notifier_to_errno(err))
2307 			break;
2308 	}
2309 
2310 	return err;
2311 }
2312 
2313 /* Same as dsa_slave_lag_changeupper() except that it calls
2314  * dsa_slave_prechangeupper()
2315  */
2316 static int
2317 dsa_slave_lag_prechangeupper(struct net_device *dev,
2318 			     struct netdev_notifier_changeupper_info *info)
2319 {
2320 	struct net_device *lower;
2321 	struct list_head *iter;
2322 	int err = NOTIFY_DONE;
2323 	struct dsa_port *dp;
2324 
2325 	netdev_for_each_lower_dev(dev, lower, iter) {
2326 		if (!dsa_slave_dev_check(lower))
2327 			continue;
2328 
2329 		dp = dsa_slave_to_port(lower);
2330 		if (!dp->lag)
2331 			/* Software LAG */
2332 			continue;
2333 
2334 		err = dsa_slave_prechangeupper(lower, info);
2335 		if (notifier_to_errno(err))
2336 			break;
2337 	}
2338 
2339 	return err;
2340 }
2341 
2342 static int
2343 dsa_prevent_bridging_8021q_upper(struct net_device *dev,
2344 				 struct netdev_notifier_changeupper_info *info)
2345 {
2346 	struct netlink_ext_ack *ext_ack;
2347 	struct net_device *slave, *br;
2348 	struct dsa_port *dp;
2349 
2350 	ext_ack = netdev_notifier_info_to_extack(&info->info);
2351 
2352 	if (!is_vlan_dev(dev))
2353 		return NOTIFY_DONE;
2354 
2355 	slave = vlan_dev_real_dev(dev);
2356 	if (!dsa_slave_dev_check(slave))
2357 		return NOTIFY_DONE;
2358 
2359 	dp = dsa_slave_to_port(slave);
2360 	br = dsa_port_bridge_dev_get(dp);
2361 	if (!br)
2362 		return NOTIFY_DONE;
2363 
2364 	/* Deny enslaving a VLAN device into a VLAN-aware bridge */
2365 	if (br_vlan_enabled(br) &&
2366 	    netif_is_bridge_master(info->upper_dev) && info->linking) {
2367 		NL_SET_ERR_MSG_MOD(ext_ack,
2368 				   "Cannot enslave VLAN device into VLAN aware bridge");
2369 		return notifier_from_errno(-EINVAL);
2370 	}
2371 
2372 	return NOTIFY_DONE;
2373 }
2374 
2375 static int
2376 dsa_slave_check_8021q_upper(struct net_device *dev,
2377 			    struct netdev_notifier_changeupper_info *info)
2378 {
2379 	struct dsa_port *dp = dsa_slave_to_port(dev);
2380 	struct net_device *br = dsa_port_bridge_dev_get(dp);
2381 	struct bridge_vlan_info br_info;
2382 	struct netlink_ext_ack *extack;
2383 	int err = NOTIFY_DONE;
2384 	u16 vid;
2385 
2386 	if (!br || !br_vlan_enabled(br))
2387 		return NOTIFY_DONE;
2388 
2389 	extack = netdev_notifier_info_to_extack(&info->info);
2390 	vid = vlan_dev_vlan_id(info->upper_dev);
2391 
2392 	/* br_vlan_get_info() returns -EINVAL or -ENOENT if the
2393 	 * device, respectively the VID is not found, returning
2394 	 * 0 means success, which is a failure for us here.
2395 	 */
2396 	err = br_vlan_get_info(br, vid, &br_info);
2397 	if (err == 0) {
2398 		NL_SET_ERR_MSG_MOD(extack,
2399 				   "This VLAN is already configured by the bridge");
2400 		return notifier_from_errno(-EBUSY);
2401 	}
2402 
2403 	return NOTIFY_DONE;
2404 }
2405 
2406 static int
2407 dsa_slave_prechangeupper_sanity_check(struct net_device *dev,
2408 				      struct netdev_notifier_changeupper_info *info)
2409 {
2410 	struct dsa_switch *ds;
2411 	struct dsa_port *dp;
2412 	int err;
2413 
2414 	if (!dsa_slave_dev_check(dev))
2415 		return dsa_prevent_bridging_8021q_upper(dev, info);
2416 
2417 	dp = dsa_slave_to_port(dev);
2418 	ds = dp->ds;
2419 
2420 	if (ds->ops->port_prechangeupper) {
2421 		err = ds->ops->port_prechangeupper(ds, dp->index, info);
2422 		if (err)
2423 			return notifier_from_errno(err);
2424 	}
2425 
2426 	if (is_vlan_dev(info->upper_dev))
2427 		return dsa_slave_check_8021q_upper(dev, info);
2428 
2429 	return NOTIFY_DONE;
2430 }
2431 
2432 static int dsa_slave_netdevice_event(struct notifier_block *nb,
2433 				     unsigned long event, void *ptr)
2434 {
2435 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2436 
2437 	switch (event) {
2438 	case NETDEV_PRECHANGEUPPER: {
2439 		struct netdev_notifier_changeupper_info *info = ptr;
2440 		int err;
2441 
2442 		err = dsa_slave_prechangeupper_sanity_check(dev, info);
2443 		if (err != NOTIFY_DONE)
2444 			return err;
2445 
2446 		if (dsa_slave_dev_check(dev))
2447 			return dsa_slave_prechangeupper(dev, ptr);
2448 
2449 		if (netif_is_lag_master(dev))
2450 			return dsa_slave_lag_prechangeupper(dev, ptr);
2451 
2452 		break;
2453 	}
2454 	case NETDEV_CHANGEUPPER:
2455 		if (dsa_slave_dev_check(dev))
2456 			return dsa_slave_changeupper(dev, ptr);
2457 
2458 		if (netif_is_lag_master(dev))
2459 			return dsa_slave_lag_changeupper(dev, ptr);
2460 
2461 		break;
2462 	case NETDEV_CHANGELOWERSTATE: {
2463 		struct netdev_notifier_changelowerstate_info *info = ptr;
2464 		struct dsa_port *dp;
2465 		int err;
2466 
2467 		if (!dsa_slave_dev_check(dev))
2468 			break;
2469 
2470 		dp = dsa_slave_to_port(dev);
2471 
2472 		err = dsa_port_lag_change(dp, info->lower_state_info);
2473 		return notifier_from_errno(err);
2474 	}
2475 	case NETDEV_CHANGE:
2476 	case NETDEV_UP: {
2477 		/* Track state of master port.
2478 		 * DSA driver may require the master port (and indirectly
2479 		 * the tagger) to be available for some special operation.
2480 		 */
2481 		if (netdev_uses_dsa(dev)) {
2482 			struct dsa_port *cpu_dp = dev->dsa_ptr;
2483 			struct dsa_switch_tree *dst = cpu_dp->ds->dst;
2484 
2485 			/* Track when the master port is UP */
2486 			dsa_tree_master_oper_state_change(dst, dev,
2487 							  netif_oper_up(dev));
2488 
2489 			/* Track when the master port is ready and can accept
2490 			 * packet.
2491 			 * NETDEV_UP event is not enough to flag a port as ready.
2492 			 * We also have to wait for linkwatch_do_dev to dev_activate
2493 			 * and emit a NETDEV_CHANGE event.
2494 			 * We check if a master port is ready by checking if the dev
2495 			 * have a qdisc assigned and is not noop.
2496 			 */
2497 			dsa_tree_master_admin_state_change(dst, dev,
2498 							   !qdisc_tx_is_noop(dev));
2499 
2500 			return NOTIFY_OK;
2501 		}
2502 
2503 		return NOTIFY_DONE;
2504 	}
2505 	case NETDEV_GOING_DOWN: {
2506 		struct dsa_port *dp, *cpu_dp;
2507 		struct dsa_switch_tree *dst;
2508 		LIST_HEAD(close_list);
2509 
2510 		if (!netdev_uses_dsa(dev))
2511 			return NOTIFY_DONE;
2512 
2513 		cpu_dp = dev->dsa_ptr;
2514 		dst = cpu_dp->ds->dst;
2515 
2516 		dsa_tree_master_admin_state_change(dst, dev, false);
2517 
2518 		list_for_each_entry(dp, &dst->ports, list) {
2519 			if (!dsa_port_is_user(dp))
2520 				continue;
2521 
2522 			list_add(&dp->slave->close_list, &close_list);
2523 		}
2524 
2525 		dev_close_many(&close_list, true);
2526 
2527 		return NOTIFY_OK;
2528 	}
2529 	default:
2530 		break;
2531 	}
2532 
2533 	return NOTIFY_DONE;
2534 }
2535 
2536 static void
2537 dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
2538 {
2539 	struct switchdev_notifier_fdb_info info = {};
2540 
2541 	info.addr = switchdev_work->addr;
2542 	info.vid = switchdev_work->vid;
2543 	info.offloaded = true;
2544 	call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
2545 				 switchdev_work->orig_dev, &info.info, NULL);
2546 }
2547 
2548 static void dsa_slave_switchdev_event_work(struct work_struct *work)
2549 {
2550 	struct dsa_switchdev_event_work *switchdev_work =
2551 		container_of(work, struct dsa_switchdev_event_work, work);
2552 	const unsigned char *addr = switchdev_work->addr;
2553 	struct net_device *dev = switchdev_work->dev;
2554 	u16 vid = switchdev_work->vid;
2555 	struct dsa_switch *ds;
2556 	struct dsa_port *dp;
2557 	int err;
2558 
2559 	dp = dsa_slave_to_port(dev);
2560 	ds = dp->ds;
2561 
2562 	switch (switchdev_work->event) {
2563 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2564 		if (switchdev_work->host_addr)
2565 			err = dsa_port_bridge_host_fdb_add(dp, addr, vid);
2566 		else if (dp->lag)
2567 			err = dsa_port_lag_fdb_add(dp, addr, vid);
2568 		else
2569 			err = dsa_port_fdb_add(dp, addr, vid);
2570 		if (err) {
2571 			dev_err(ds->dev,
2572 				"port %d failed to add %pM vid %d to fdb: %d\n",
2573 				dp->index, addr, vid, err);
2574 			break;
2575 		}
2576 		dsa_fdb_offload_notify(switchdev_work);
2577 		break;
2578 
2579 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2580 		if (switchdev_work->host_addr)
2581 			err = dsa_port_bridge_host_fdb_del(dp, addr, vid);
2582 		else if (dp->lag)
2583 			err = dsa_port_lag_fdb_del(dp, addr, vid);
2584 		else
2585 			err = dsa_port_fdb_del(dp, addr, vid);
2586 		if (err) {
2587 			dev_err(ds->dev,
2588 				"port %d failed to delete %pM vid %d from fdb: %d\n",
2589 				dp->index, addr, vid, err);
2590 		}
2591 
2592 		break;
2593 	}
2594 
2595 	kfree(switchdev_work);
2596 }
2597 
2598 static bool dsa_foreign_dev_check(const struct net_device *dev,
2599 				  const struct net_device *foreign_dev)
2600 {
2601 	const struct dsa_port *dp = dsa_slave_to_port(dev);
2602 	struct dsa_switch_tree *dst = dp->ds->dst;
2603 
2604 	if (netif_is_bridge_master(foreign_dev))
2605 		return !dsa_tree_offloads_bridge_dev(dst, foreign_dev);
2606 
2607 	if (netif_is_bridge_port(foreign_dev))
2608 		return !dsa_tree_offloads_bridge_port(dst, foreign_dev);
2609 
2610 	/* Everything else is foreign */
2611 	return true;
2612 }
2613 
2614 static int dsa_slave_fdb_event(struct net_device *dev,
2615 			       struct net_device *orig_dev,
2616 			       unsigned long event, const void *ctx,
2617 			       const struct switchdev_notifier_fdb_info *fdb_info)
2618 {
2619 	struct dsa_switchdev_event_work *switchdev_work;
2620 	struct dsa_port *dp = dsa_slave_to_port(dev);
2621 	bool host_addr = fdb_info->is_local;
2622 	struct dsa_switch *ds = dp->ds;
2623 
2624 	if (ctx && ctx != dp)
2625 		return 0;
2626 
2627 	if (switchdev_fdb_is_dynamically_learned(fdb_info)) {
2628 		if (dsa_port_offloads_bridge_port(dp, orig_dev))
2629 			return 0;
2630 
2631 		/* FDB entries learned by the software bridge or by foreign
2632 		 * bridge ports should be installed as host addresses only if
2633 		 * the driver requests assisted learning.
2634 		 */
2635 		if (!ds->assisted_learning_on_cpu_port)
2636 			return 0;
2637 	}
2638 
2639 	/* Also treat FDB entries on foreign interfaces bridged with us as host
2640 	 * addresses.
2641 	 */
2642 	if (dsa_foreign_dev_check(dev, orig_dev))
2643 		host_addr = true;
2644 
2645 	/* Check early that we're not doing work in vain.
2646 	 * Host addresses on LAG ports still require regular FDB ops,
2647 	 * since the CPU port isn't in a LAG.
2648 	 */
2649 	if (dp->lag && !host_addr) {
2650 		if (!ds->ops->lag_fdb_add || !ds->ops->lag_fdb_del)
2651 			return -EOPNOTSUPP;
2652 	} else {
2653 		if (!ds->ops->port_fdb_add || !ds->ops->port_fdb_del)
2654 			return -EOPNOTSUPP;
2655 	}
2656 
2657 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2658 	if (!switchdev_work)
2659 		return -ENOMEM;
2660 
2661 	netdev_dbg(dev, "%s FDB entry towards %s, addr %pM vid %d%s\n",
2662 		   event == SWITCHDEV_FDB_ADD_TO_DEVICE ? "Adding" : "Deleting",
2663 		   orig_dev->name, fdb_info->addr, fdb_info->vid,
2664 		   host_addr ? " as host address" : "");
2665 
2666 	INIT_WORK(&switchdev_work->work, dsa_slave_switchdev_event_work);
2667 	switchdev_work->event = event;
2668 	switchdev_work->dev = dev;
2669 	switchdev_work->orig_dev = orig_dev;
2670 
2671 	ether_addr_copy(switchdev_work->addr, fdb_info->addr);
2672 	switchdev_work->vid = fdb_info->vid;
2673 	switchdev_work->host_addr = host_addr;
2674 
2675 	dsa_schedule_work(&switchdev_work->work);
2676 
2677 	return 0;
2678 }
2679 
2680 /* Called under rcu_read_lock() */
2681 static int dsa_slave_switchdev_event(struct notifier_block *unused,
2682 				     unsigned long event, void *ptr)
2683 {
2684 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2685 	int err;
2686 
2687 	switch (event) {
2688 	case SWITCHDEV_PORT_ATTR_SET:
2689 		err = switchdev_handle_port_attr_set(dev, ptr,
2690 						     dsa_slave_dev_check,
2691 						     dsa_slave_port_attr_set);
2692 		return notifier_from_errno(err);
2693 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2694 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2695 		err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
2696 							   dsa_slave_dev_check,
2697 							   dsa_foreign_dev_check,
2698 							   dsa_slave_fdb_event);
2699 		return notifier_from_errno(err);
2700 	default:
2701 		return NOTIFY_DONE;
2702 	}
2703 
2704 	return NOTIFY_OK;
2705 }
2706 
2707 static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
2708 					      unsigned long event, void *ptr)
2709 {
2710 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2711 	int err;
2712 
2713 	switch (event) {
2714 	case SWITCHDEV_PORT_OBJ_ADD:
2715 		err = switchdev_handle_port_obj_add_foreign(dev, ptr,
2716 							    dsa_slave_dev_check,
2717 							    dsa_foreign_dev_check,
2718 							    dsa_slave_port_obj_add);
2719 		return notifier_from_errno(err);
2720 	case SWITCHDEV_PORT_OBJ_DEL:
2721 		err = switchdev_handle_port_obj_del_foreign(dev, ptr,
2722 							    dsa_slave_dev_check,
2723 							    dsa_foreign_dev_check,
2724 							    dsa_slave_port_obj_del);
2725 		return notifier_from_errno(err);
2726 	case SWITCHDEV_PORT_ATTR_SET:
2727 		err = switchdev_handle_port_attr_set(dev, ptr,
2728 						     dsa_slave_dev_check,
2729 						     dsa_slave_port_attr_set);
2730 		return notifier_from_errno(err);
2731 	}
2732 
2733 	return NOTIFY_DONE;
2734 }
2735 
2736 static struct notifier_block dsa_slave_nb __read_mostly = {
2737 	.notifier_call  = dsa_slave_netdevice_event,
2738 };
2739 
2740 struct notifier_block dsa_slave_switchdev_notifier = {
2741 	.notifier_call = dsa_slave_switchdev_event,
2742 };
2743 
2744 struct notifier_block dsa_slave_switchdev_blocking_notifier = {
2745 	.notifier_call = dsa_slave_switchdev_blocking_event,
2746 };
2747 
2748 int dsa_slave_register_notifier(void)
2749 {
2750 	struct notifier_block *nb;
2751 	int err;
2752 
2753 	err = register_netdevice_notifier(&dsa_slave_nb);
2754 	if (err)
2755 		return err;
2756 
2757 	err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
2758 	if (err)
2759 		goto err_switchdev_nb;
2760 
2761 	nb = &dsa_slave_switchdev_blocking_notifier;
2762 	err = register_switchdev_blocking_notifier(nb);
2763 	if (err)
2764 		goto err_switchdev_blocking_nb;
2765 
2766 	return 0;
2767 
2768 err_switchdev_blocking_nb:
2769 	unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
2770 err_switchdev_nb:
2771 	unregister_netdevice_notifier(&dsa_slave_nb);
2772 	return err;
2773 }
2774 
2775 void dsa_slave_unregister_notifier(void)
2776 {
2777 	struct notifier_block *nb;
2778 	int err;
2779 
2780 	nb = &dsa_slave_switchdev_blocking_notifier;
2781 	err = unregister_switchdev_blocking_notifier(nb);
2782 	if (err)
2783 		pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
2784 
2785 	err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
2786 	if (err)
2787 		pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
2788 
2789 	err = unregister_netdevice_notifier(&dsa_slave_nb);
2790 	if (err)
2791 		pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
2792 }
2793