xref: /openbmc/linux/net/dsa/slave.c (revision 6fb8661c8f97bb062d2ecc7a57591d38e6c4f8c8)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/dsa/slave.c - Slave device handling
4  * Copyright (c) 2008-2009 Marvell Semiconductor
5  */
6 
7 #include <linux/list.h>
8 #include <linux/etherdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/phy.h>
11 #include <linux/phy_fixed.h>
12 #include <linux/phylink.h>
13 #include <linux/of_net.h>
14 #include <linux/of_mdio.h>
15 #include <linux/mdio.h>
16 #include <net/rtnetlink.h>
17 #include <net/pkt_cls.h>
18 #include <net/selftests.h>
19 #include <net/tc_act/tc_mirred.h>
20 #include <linux/if_bridge.h>
21 #include <linux/if_hsr.h>
22 #include <linux/netpoll.h>
23 
24 #include "dsa_priv.h"
25 
26 static void dsa_slave_standalone_event_work(struct work_struct *work)
27 {
28 	struct dsa_standalone_event_work *standalone_work =
29 		container_of(work, struct dsa_standalone_event_work, work);
30 	const unsigned char *addr = standalone_work->addr;
31 	struct net_device *dev = standalone_work->dev;
32 	struct dsa_port *dp = dsa_slave_to_port(dev);
33 	struct switchdev_obj_port_mdb mdb;
34 	struct dsa_switch *ds = dp->ds;
35 	u16 vid = standalone_work->vid;
36 	int err;
37 
38 	switch (standalone_work->event) {
39 	case DSA_UC_ADD:
40 		err = dsa_port_standalone_host_fdb_add(dp, addr, vid);
41 		if (err) {
42 			dev_err(ds->dev,
43 				"port %d failed to add %pM vid %d to fdb: %d\n",
44 				dp->index, addr, vid, err);
45 			break;
46 		}
47 		break;
48 
49 	case DSA_UC_DEL:
50 		err = dsa_port_standalone_host_fdb_del(dp, addr, vid);
51 		if (err) {
52 			dev_err(ds->dev,
53 				"port %d failed to delete %pM vid %d from fdb: %d\n",
54 				dp->index, addr, vid, err);
55 		}
56 
57 		break;
58 	case DSA_MC_ADD:
59 		ether_addr_copy(mdb.addr, addr);
60 		mdb.vid = vid;
61 
62 		err = dsa_port_standalone_host_mdb_add(dp, &mdb);
63 		if (err) {
64 			dev_err(ds->dev,
65 				"port %d failed to add %pM vid %d to mdb: %d\n",
66 				dp->index, addr, vid, err);
67 			break;
68 		}
69 		break;
70 	case DSA_MC_DEL:
71 		ether_addr_copy(mdb.addr, addr);
72 		mdb.vid = vid;
73 
74 		err = dsa_port_standalone_host_mdb_del(dp, &mdb);
75 		if (err) {
76 			dev_err(ds->dev,
77 				"port %d failed to delete %pM vid %d from mdb: %d\n",
78 				dp->index, addr, vid, err);
79 		}
80 
81 		break;
82 	}
83 
84 	kfree(standalone_work);
85 }
86 
87 static int dsa_slave_schedule_standalone_work(struct net_device *dev,
88 					      enum dsa_standalone_event event,
89 					      const unsigned char *addr,
90 					      u16 vid)
91 {
92 	struct dsa_standalone_event_work *standalone_work;
93 
94 	standalone_work = kzalloc(sizeof(*standalone_work), GFP_ATOMIC);
95 	if (!standalone_work)
96 		return -ENOMEM;
97 
98 	INIT_WORK(&standalone_work->work, dsa_slave_standalone_event_work);
99 	standalone_work->event = event;
100 	standalone_work->dev = dev;
101 
102 	ether_addr_copy(standalone_work->addr, addr);
103 	standalone_work->vid = vid;
104 
105 	dsa_schedule_work(&standalone_work->work);
106 
107 	return 0;
108 }
109 
110 static int dsa_slave_sync_uc(struct net_device *dev,
111 			     const unsigned char *addr)
112 {
113 	return dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD, addr, 0);
114 }
115 
116 static int dsa_slave_unsync_uc(struct net_device *dev,
117 			       const unsigned char *addr)
118 {
119 	return dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL, addr, 0);
120 }
121 
122 static int dsa_slave_sync_mc(struct net_device *dev,
123 			     const unsigned char *addr)
124 {
125 	return dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, addr, 0);
126 }
127 
128 static int dsa_slave_unsync_mc(struct net_device *dev,
129 			       const unsigned char *addr)
130 {
131 	return dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, addr, 0);
132 }
133 
134 /* slave mii_bus handling ***************************************************/
135 static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
136 {
137 	struct dsa_switch *ds = bus->priv;
138 
139 	if (ds->phys_mii_mask & (1 << addr))
140 		return ds->ops->phy_read(ds, addr, reg);
141 
142 	return 0xffff;
143 }
144 
145 static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
146 {
147 	struct dsa_switch *ds = bus->priv;
148 
149 	if (ds->phys_mii_mask & (1 << addr))
150 		return ds->ops->phy_write(ds, addr, reg, val);
151 
152 	return 0;
153 }
154 
155 void dsa_slave_mii_bus_init(struct dsa_switch *ds)
156 {
157 	ds->slave_mii_bus->priv = (void *)ds;
158 	ds->slave_mii_bus->name = "dsa slave smi";
159 	ds->slave_mii_bus->read = dsa_slave_phy_read;
160 	ds->slave_mii_bus->write = dsa_slave_phy_write;
161 	snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
162 		 ds->dst->index, ds->index);
163 	ds->slave_mii_bus->parent = ds->dev;
164 	ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
165 }
166 
167 
168 /* slave device handling ****************************************************/
169 static int dsa_slave_get_iflink(const struct net_device *dev)
170 {
171 	return dsa_slave_to_master(dev)->ifindex;
172 }
173 
174 static int dsa_slave_open(struct net_device *dev)
175 {
176 	struct net_device *master = dsa_slave_to_master(dev);
177 	struct dsa_port *dp = dsa_slave_to_port(dev);
178 	struct dsa_switch *ds = dp->ds;
179 	int err;
180 
181 	err = dev_open(master, NULL);
182 	if (err < 0) {
183 		netdev_err(dev, "failed to open master %s\n", master->name);
184 		goto out;
185 	}
186 
187 	if (dsa_switch_supports_uc_filtering(ds)) {
188 		err = dsa_port_standalone_host_fdb_add(dp, dev->dev_addr, 0);
189 		if (err)
190 			goto out;
191 	}
192 
193 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
194 		err = dev_uc_add(master, dev->dev_addr);
195 		if (err < 0)
196 			goto del_host_addr;
197 	}
198 
199 	err = dsa_port_enable_rt(dp, dev->phydev);
200 	if (err)
201 		goto del_unicast;
202 
203 	return 0;
204 
205 del_unicast:
206 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
207 		dev_uc_del(master, dev->dev_addr);
208 del_host_addr:
209 	if (dsa_switch_supports_uc_filtering(ds))
210 		dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
211 out:
212 	return err;
213 }
214 
215 static int dsa_slave_close(struct net_device *dev)
216 {
217 	struct net_device *master = dsa_slave_to_master(dev);
218 	struct dsa_port *dp = dsa_slave_to_port(dev);
219 	struct dsa_switch *ds = dp->ds;
220 
221 	dsa_port_disable_rt(dp);
222 
223 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
224 		dev_uc_del(master, dev->dev_addr);
225 
226 	if (dsa_switch_supports_uc_filtering(ds))
227 		dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
228 
229 	return 0;
230 }
231 
232 /* Keep flooding enabled towards this port's CPU port as long as it serves at
233  * least one port in the tree that requires it.
234  */
235 static void dsa_port_manage_cpu_flood(struct dsa_port *dp)
236 {
237 	struct switchdev_brport_flags flags = {
238 		.mask = BR_FLOOD | BR_MCAST_FLOOD,
239 	};
240 	struct dsa_switch_tree *dst = dp->ds->dst;
241 	struct dsa_port *cpu_dp = dp->cpu_dp;
242 	struct dsa_port *other_dp;
243 	int err;
244 
245 	list_for_each_entry(other_dp, &dst->ports, list) {
246 		if (!dsa_port_is_user(other_dp))
247 			continue;
248 
249 		if (other_dp->cpu_dp != cpu_dp)
250 			continue;
251 
252 		if (other_dp->slave->flags & IFF_ALLMULTI)
253 			flags.val |= BR_MCAST_FLOOD;
254 		if (other_dp->slave->flags & IFF_PROMISC)
255 			flags.val |= BR_FLOOD;
256 	}
257 
258 	err = dsa_port_pre_bridge_flags(dp, flags, NULL);
259 	if (err)
260 		return;
261 
262 	dsa_port_bridge_flags(cpu_dp, flags, NULL);
263 }
264 
265 static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
266 {
267 	struct net_device *master = dsa_slave_to_master(dev);
268 	struct dsa_port *dp = dsa_slave_to_port(dev);
269 	struct dsa_switch *ds = dp->ds;
270 
271 	if (change & IFF_ALLMULTI)
272 		dev_set_allmulti(master,
273 				 dev->flags & IFF_ALLMULTI ? 1 : -1);
274 	if (change & IFF_PROMISC)
275 		dev_set_promiscuity(master,
276 				    dev->flags & IFF_PROMISC ? 1 : -1);
277 
278 	if (dsa_switch_supports_uc_filtering(ds) &&
279 	    dsa_switch_supports_mc_filtering(ds))
280 		dsa_port_manage_cpu_flood(dp);
281 }
282 
283 static void dsa_slave_set_rx_mode(struct net_device *dev)
284 {
285 	struct net_device *master = dsa_slave_to_master(dev);
286 	struct dsa_port *dp = dsa_slave_to_port(dev);
287 	struct dsa_switch *ds = dp->ds;
288 
289 	dev_mc_sync(master, dev);
290 	dev_uc_sync(master, dev);
291 	if (dsa_switch_supports_mc_filtering(ds))
292 		__dev_mc_sync(dev, dsa_slave_sync_mc, dsa_slave_unsync_mc);
293 	if (dsa_switch_supports_uc_filtering(ds))
294 		__dev_uc_sync(dev, dsa_slave_sync_uc, dsa_slave_unsync_uc);
295 }
296 
297 static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
298 {
299 	struct net_device *master = dsa_slave_to_master(dev);
300 	struct dsa_port *dp = dsa_slave_to_port(dev);
301 	struct dsa_switch *ds = dp->ds;
302 	struct sockaddr *addr = a;
303 	int err;
304 
305 	if (!is_valid_ether_addr(addr->sa_data))
306 		return -EADDRNOTAVAIL;
307 
308 	if (dsa_switch_supports_uc_filtering(ds)) {
309 		err = dsa_port_standalone_host_fdb_add(dp, addr->sa_data, 0);
310 		if (err)
311 			return err;
312 	}
313 
314 	if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
315 		err = dev_uc_add(master, addr->sa_data);
316 		if (err < 0)
317 			goto del_unicast;
318 	}
319 
320 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
321 		dev_uc_del(master, dev->dev_addr);
322 
323 	if (dsa_switch_supports_uc_filtering(ds))
324 		dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
325 
326 	eth_hw_addr_set(dev, addr->sa_data);
327 
328 	return 0;
329 
330 del_unicast:
331 	if (dsa_switch_supports_uc_filtering(ds))
332 		dsa_port_standalone_host_fdb_del(dp, addr->sa_data, 0);
333 
334 	return err;
335 }
336 
337 struct dsa_slave_dump_ctx {
338 	struct net_device *dev;
339 	struct sk_buff *skb;
340 	struct netlink_callback *cb;
341 	int idx;
342 };
343 
344 static int
345 dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
346 			   bool is_static, void *data)
347 {
348 	struct dsa_slave_dump_ctx *dump = data;
349 	u32 portid = NETLINK_CB(dump->cb->skb).portid;
350 	u32 seq = dump->cb->nlh->nlmsg_seq;
351 	struct nlmsghdr *nlh;
352 	struct ndmsg *ndm;
353 
354 	if (dump->idx < dump->cb->args[2])
355 		goto skip;
356 
357 	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
358 			sizeof(*ndm), NLM_F_MULTI);
359 	if (!nlh)
360 		return -EMSGSIZE;
361 
362 	ndm = nlmsg_data(nlh);
363 	ndm->ndm_family  = AF_BRIDGE;
364 	ndm->ndm_pad1    = 0;
365 	ndm->ndm_pad2    = 0;
366 	ndm->ndm_flags   = NTF_SELF;
367 	ndm->ndm_type    = 0;
368 	ndm->ndm_ifindex = dump->dev->ifindex;
369 	ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;
370 
371 	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
372 		goto nla_put_failure;
373 
374 	if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
375 		goto nla_put_failure;
376 
377 	nlmsg_end(dump->skb, nlh);
378 
379 skip:
380 	dump->idx++;
381 	return 0;
382 
383 nla_put_failure:
384 	nlmsg_cancel(dump->skb, nlh);
385 	return -EMSGSIZE;
386 }
387 
388 static int
389 dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
390 		   struct net_device *dev, struct net_device *filter_dev,
391 		   int *idx)
392 {
393 	struct dsa_port *dp = dsa_slave_to_port(dev);
394 	struct dsa_slave_dump_ctx dump = {
395 		.dev = dev,
396 		.skb = skb,
397 		.cb = cb,
398 		.idx = *idx,
399 	};
400 	int err;
401 
402 	err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
403 	*idx = dump.idx;
404 
405 	return err;
406 }
407 
408 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
409 {
410 	struct dsa_slave_priv *p = netdev_priv(dev);
411 	struct dsa_switch *ds = p->dp->ds;
412 	int port = p->dp->index;
413 
414 	/* Pass through to switch driver if it supports timestamping */
415 	switch (cmd) {
416 	case SIOCGHWTSTAMP:
417 		if (ds->ops->port_hwtstamp_get)
418 			return ds->ops->port_hwtstamp_get(ds, port, ifr);
419 		break;
420 	case SIOCSHWTSTAMP:
421 		if (ds->ops->port_hwtstamp_set)
422 			return ds->ops->port_hwtstamp_set(ds, port, ifr);
423 		break;
424 	}
425 
426 	return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
427 }
428 
429 static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx,
430 				   const struct switchdev_attr *attr,
431 				   struct netlink_ext_ack *extack)
432 {
433 	struct dsa_port *dp = dsa_slave_to_port(dev);
434 	int ret;
435 
436 	if (ctx && ctx != dp)
437 		return 0;
438 
439 	switch (attr->id) {
440 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
441 		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
442 			return -EOPNOTSUPP;
443 
444 		ret = dsa_port_set_state(dp, attr->u.stp_state, true);
445 		break;
446 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
447 		if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
448 			return -EOPNOTSUPP;
449 
450 		ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
451 					      extack);
452 		break;
453 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
454 		if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
455 			return -EOPNOTSUPP;
456 
457 		ret = dsa_port_ageing_time(dp, attr->u.ageing_time);
458 		break;
459 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
460 		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
461 			return -EOPNOTSUPP;
462 
463 		ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
464 						extack);
465 		break;
466 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
467 		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
468 			return -EOPNOTSUPP;
469 
470 		ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack);
471 		break;
472 	default:
473 		ret = -EOPNOTSUPP;
474 		break;
475 	}
476 
477 	return ret;
478 }
479 
480 /* Must be called under rcu_read_lock() */
481 static int
482 dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave,
483 				      const struct switchdev_obj_port_vlan *vlan)
484 {
485 	struct net_device *upper_dev;
486 	struct list_head *iter;
487 
488 	netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
489 		u16 vid;
490 
491 		if (!is_vlan_dev(upper_dev))
492 			continue;
493 
494 		vid = vlan_dev_vlan_id(upper_dev);
495 		if (vid == vlan->vid)
496 			return -EBUSY;
497 	}
498 
499 	return 0;
500 }
501 
502 static int dsa_slave_vlan_add(struct net_device *dev,
503 			      const struct switchdev_obj *obj,
504 			      struct netlink_ext_ack *extack)
505 {
506 	struct dsa_port *dp = dsa_slave_to_port(dev);
507 	struct switchdev_obj_port_vlan *vlan;
508 	int err;
509 
510 	if (dsa_port_skip_vlan_configuration(dp)) {
511 		NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
512 		return 0;
513 	}
514 
515 	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
516 
517 	/* Deny adding a bridge VLAN when there is already an 802.1Q upper with
518 	 * the same VID.
519 	 */
520 	if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) {
521 		rcu_read_lock();
522 		err = dsa_slave_vlan_check_for_8021q_uppers(dev, vlan);
523 		rcu_read_unlock();
524 		if (err) {
525 			NL_SET_ERR_MSG_MOD(extack,
526 					   "Port already has a VLAN upper with this VID");
527 			return err;
528 		}
529 	}
530 
531 	return dsa_port_vlan_add(dp, vlan, extack);
532 }
533 
534 /* Offload a VLAN installed on the bridge or on a foreign interface by
535  * installing it as a VLAN towards the CPU port.
536  */
537 static int dsa_slave_host_vlan_add(struct net_device *dev,
538 				   const struct switchdev_obj *obj,
539 				   struct netlink_ext_ack *extack)
540 {
541 	struct dsa_port *dp = dsa_slave_to_port(dev);
542 	struct switchdev_obj_port_vlan vlan;
543 
544 	/* Do nothing if this is a software bridge */
545 	if (!dp->bridge)
546 		return -EOPNOTSUPP;
547 
548 	if (dsa_port_skip_vlan_configuration(dp)) {
549 		NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
550 		return 0;
551 	}
552 
553 	vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
554 
555 	/* Even though drivers often handle CPU membership in special ways,
556 	 * it doesn't make sense to program a PVID, so clear this flag.
557 	 */
558 	vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
559 
560 	return dsa_port_host_vlan_add(dp, &vlan, extack);
561 }
562 
563 static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
564 				  const struct switchdev_obj *obj,
565 				  struct netlink_ext_ack *extack)
566 {
567 	struct dsa_port *dp = dsa_slave_to_port(dev);
568 	int err;
569 
570 	if (ctx && ctx != dp)
571 		return 0;
572 
573 	switch (obj->id) {
574 	case SWITCHDEV_OBJ_ID_PORT_MDB:
575 		if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
576 			return -EOPNOTSUPP;
577 
578 		err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
579 		break;
580 	case SWITCHDEV_OBJ_ID_HOST_MDB:
581 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
582 			return -EOPNOTSUPP;
583 
584 		err = dsa_port_bridge_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
585 		break;
586 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
587 		if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
588 			err = dsa_slave_vlan_add(dev, obj, extack);
589 		else
590 			err = dsa_slave_host_vlan_add(dev, obj, extack);
591 		break;
592 	case SWITCHDEV_OBJ_ID_MRP:
593 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
594 			return -EOPNOTSUPP;
595 
596 		err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj));
597 		break;
598 	case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
599 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
600 			return -EOPNOTSUPP;
601 
602 		err = dsa_port_mrp_add_ring_role(dp,
603 						 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
604 		break;
605 	default:
606 		err = -EOPNOTSUPP;
607 		break;
608 	}
609 
610 	return err;
611 }
612 
613 static int dsa_slave_vlan_del(struct net_device *dev,
614 			      const struct switchdev_obj *obj)
615 {
616 	struct dsa_port *dp = dsa_slave_to_port(dev);
617 	struct switchdev_obj_port_vlan *vlan;
618 
619 	if (dsa_port_skip_vlan_configuration(dp))
620 		return 0;
621 
622 	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
623 
624 	return dsa_port_vlan_del(dp, vlan);
625 }
626 
627 static int dsa_slave_host_vlan_del(struct net_device *dev,
628 				   const struct switchdev_obj *obj)
629 {
630 	struct dsa_port *dp = dsa_slave_to_port(dev);
631 	struct switchdev_obj_port_vlan *vlan;
632 
633 	/* Do nothing if this is a software bridge */
634 	if (!dp->bridge)
635 		return -EOPNOTSUPP;
636 
637 	if (dsa_port_skip_vlan_configuration(dp))
638 		return 0;
639 
640 	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
641 
642 	return dsa_port_host_vlan_del(dp, vlan);
643 }
644 
645 static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
646 				  const struct switchdev_obj *obj)
647 {
648 	struct dsa_port *dp = dsa_slave_to_port(dev);
649 	int err;
650 
651 	if (ctx && ctx != dp)
652 		return 0;
653 
654 	switch (obj->id) {
655 	case SWITCHDEV_OBJ_ID_PORT_MDB:
656 		if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
657 			return -EOPNOTSUPP;
658 
659 		err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
660 		break;
661 	case SWITCHDEV_OBJ_ID_HOST_MDB:
662 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
663 			return -EOPNOTSUPP;
664 
665 		err = dsa_port_bridge_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
666 		break;
667 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
668 		if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
669 			err = dsa_slave_vlan_del(dev, obj);
670 		else
671 			err = dsa_slave_host_vlan_del(dev, obj);
672 		break;
673 	case SWITCHDEV_OBJ_ID_MRP:
674 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
675 			return -EOPNOTSUPP;
676 
677 		err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj));
678 		break;
679 	case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
680 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
681 			return -EOPNOTSUPP;
682 
683 		err = dsa_port_mrp_del_ring_role(dp,
684 						 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
685 		break;
686 	default:
687 		err = -EOPNOTSUPP;
688 		break;
689 	}
690 
691 	return err;
692 }
693 
694 static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
695 						     struct sk_buff *skb)
696 {
697 #ifdef CONFIG_NET_POLL_CONTROLLER
698 	struct dsa_slave_priv *p = netdev_priv(dev);
699 
700 	return netpoll_send_skb(p->netpoll, skb);
701 #else
702 	BUG();
703 	return NETDEV_TX_OK;
704 #endif
705 }
706 
707 static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
708 				 struct sk_buff *skb)
709 {
710 	struct dsa_switch *ds = p->dp->ds;
711 
712 	if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
713 		return;
714 
715 	if (!ds->ops->port_txtstamp)
716 		return;
717 
718 	ds->ops->port_txtstamp(ds, p->dp->index, skb);
719 }
720 
721 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
722 {
723 	/* SKB for netpoll still need to be mangled with the protocol-specific
724 	 * tag to be successfully transmitted
725 	 */
726 	if (unlikely(netpoll_tx_running(dev)))
727 		return dsa_slave_netpoll_send_skb(dev, skb);
728 
729 	/* Queue the SKB for transmission on the parent interface, but
730 	 * do not modify its EtherType
731 	 */
732 	skb->dev = dsa_slave_to_master(dev);
733 	dev_queue_xmit(skb);
734 
735 	return NETDEV_TX_OK;
736 }
737 EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
738 
739 static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
740 {
741 	int needed_headroom = dev->needed_headroom;
742 	int needed_tailroom = dev->needed_tailroom;
743 
744 	/* For tail taggers, we need to pad short frames ourselves, to ensure
745 	 * that the tail tag does not fail at its role of being at the end of
746 	 * the packet, once the master interface pads the frame. Account for
747 	 * that pad length here, and pad later.
748 	 */
749 	if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
750 		needed_tailroom += ETH_ZLEN - skb->len;
751 	/* skb_headroom() returns unsigned int... */
752 	needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
753 	needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
754 
755 	if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
756 		/* No reallocation needed, yay! */
757 		return 0;
758 
759 	return pskb_expand_head(skb, needed_headroom, needed_tailroom,
760 				GFP_ATOMIC);
761 }
762 
763 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
764 {
765 	struct dsa_slave_priv *p = netdev_priv(dev);
766 	struct sk_buff *nskb;
767 
768 	dev_sw_netstats_tx_add(dev, 1, skb->len);
769 
770 	memset(skb->cb, 0, sizeof(skb->cb));
771 
772 	/* Handle tx timestamp if any */
773 	dsa_skb_tx_timestamp(p, skb);
774 
775 	if (dsa_realloc_skb(skb, dev)) {
776 		dev_kfree_skb_any(skb);
777 		return NETDEV_TX_OK;
778 	}
779 
780 	/* needed_tailroom should still be 'warm' in the cache line from
781 	 * dsa_realloc_skb(), which has also ensured that padding is safe.
782 	 */
783 	if (dev->needed_tailroom)
784 		eth_skb_pad(skb);
785 
786 	/* Transmit function may have to reallocate the original SKB,
787 	 * in which case it must have freed it. Only free it here on error.
788 	 */
789 	nskb = p->xmit(skb, dev);
790 	if (!nskb) {
791 		kfree_skb(skb);
792 		return NETDEV_TX_OK;
793 	}
794 
795 	return dsa_enqueue_skb(nskb, dev);
796 }
797 
798 /* ethtool operations *******************************************************/
799 
800 static void dsa_slave_get_drvinfo(struct net_device *dev,
801 				  struct ethtool_drvinfo *drvinfo)
802 {
803 	strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
804 	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
805 	strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
806 }
807 
808 static int dsa_slave_get_regs_len(struct net_device *dev)
809 {
810 	struct dsa_port *dp = dsa_slave_to_port(dev);
811 	struct dsa_switch *ds = dp->ds;
812 
813 	if (ds->ops->get_regs_len)
814 		return ds->ops->get_regs_len(ds, dp->index);
815 
816 	return -EOPNOTSUPP;
817 }
818 
819 static void
820 dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
821 {
822 	struct dsa_port *dp = dsa_slave_to_port(dev);
823 	struct dsa_switch *ds = dp->ds;
824 
825 	if (ds->ops->get_regs)
826 		ds->ops->get_regs(ds, dp->index, regs, _p);
827 }
828 
829 static int dsa_slave_nway_reset(struct net_device *dev)
830 {
831 	struct dsa_port *dp = dsa_slave_to_port(dev);
832 
833 	return phylink_ethtool_nway_reset(dp->pl);
834 }
835 
836 static int dsa_slave_get_eeprom_len(struct net_device *dev)
837 {
838 	struct dsa_port *dp = dsa_slave_to_port(dev);
839 	struct dsa_switch *ds = dp->ds;
840 
841 	if (ds->cd && ds->cd->eeprom_len)
842 		return ds->cd->eeprom_len;
843 
844 	if (ds->ops->get_eeprom_len)
845 		return ds->ops->get_eeprom_len(ds);
846 
847 	return 0;
848 }
849 
850 static int dsa_slave_get_eeprom(struct net_device *dev,
851 				struct ethtool_eeprom *eeprom, u8 *data)
852 {
853 	struct dsa_port *dp = dsa_slave_to_port(dev);
854 	struct dsa_switch *ds = dp->ds;
855 
856 	if (ds->ops->get_eeprom)
857 		return ds->ops->get_eeprom(ds, eeprom, data);
858 
859 	return -EOPNOTSUPP;
860 }
861 
862 static int dsa_slave_set_eeprom(struct net_device *dev,
863 				struct ethtool_eeprom *eeprom, u8 *data)
864 {
865 	struct dsa_port *dp = dsa_slave_to_port(dev);
866 	struct dsa_switch *ds = dp->ds;
867 
868 	if (ds->ops->set_eeprom)
869 		return ds->ops->set_eeprom(ds, eeprom, data);
870 
871 	return -EOPNOTSUPP;
872 }
873 
874 static void dsa_slave_get_strings(struct net_device *dev,
875 				  uint32_t stringset, uint8_t *data)
876 {
877 	struct dsa_port *dp = dsa_slave_to_port(dev);
878 	struct dsa_switch *ds = dp->ds;
879 
880 	if (stringset == ETH_SS_STATS) {
881 		int len = ETH_GSTRING_LEN;
882 
883 		strncpy(data, "tx_packets", len);
884 		strncpy(data + len, "tx_bytes", len);
885 		strncpy(data + 2 * len, "rx_packets", len);
886 		strncpy(data + 3 * len, "rx_bytes", len);
887 		if (ds->ops->get_strings)
888 			ds->ops->get_strings(ds, dp->index, stringset,
889 					     data + 4 * len);
890 	} else if (stringset ==  ETH_SS_TEST) {
891 		net_selftest_get_strings(data);
892 	}
893 
894 }
895 
896 static void dsa_slave_get_ethtool_stats(struct net_device *dev,
897 					struct ethtool_stats *stats,
898 					uint64_t *data)
899 {
900 	struct dsa_port *dp = dsa_slave_to_port(dev);
901 	struct dsa_switch *ds = dp->ds;
902 	struct pcpu_sw_netstats *s;
903 	unsigned int start;
904 	int i;
905 
906 	for_each_possible_cpu(i) {
907 		u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
908 
909 		s = per_cpu_ptr(dev->tstats, i);
910 		do {
911 			start = u64_stats_fetch_begin_irq(&s->syncp);
912 			tx_packets = s->tx_packets;
913 			tx_bytes = s->tx_bytes;
914 			rx_packets = s->rx_packets;
915 			rx_bytes = s->rx_bytes;
916 		} while (u64_stats_fetch_retry_irq(&s->syncp, start));
917 		data[0] += tx_packets;
918 		data[1] += tx_bytes;
919 		data[2] += rx_packets;
920 		data[3] += rx_bytes;
921 	}
922 	if (ds->ops->get_ethtool_stats)
923 		ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
924 }
925 
926 static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
927 {
928 	struct dsa_port *dp = dsa_slave_to_port(dev);
929 	struct dsa_switch *ds = dp->ds;
930 
931 	if (sset == ETH_SS_STATS) {
932 		int count = 0;
933 
934 		if (ds->ops->get_sset_count) {
935 			count = ds->ops->get_sset_count(ds, dp->index, sset);
936 			if (count < 0)
937 				return count;
938 		}
939 
940 		return count + 4;
941 	} else if (sset ==  ETH_SS_TEST) {
942 		return net_selftest_get_count();
943 	}
944 
945 	return -EOPNOTSUPP;
946 }
947 
948 static void dsa_slave_get_eth_phy_stats(struct net_device *dev,
949 					struct ethtool_eth_phy_stats *phy_stats)
950 {
951 	struct dsa_port *dp = dsa_slave_to_port(dev);
952 	struct dsa_switch *ds = dp->ds;
953 
954 	if (ds->ops->get_eth_phy_stats)
955 		ds->ops->get_eth_phy_stats(ds, dp->index, phy_stats);
956 }
957 
958 static void dsa_slave_get_eth_mac_stats(struct net_device *dev,
959 					struct ethtool_eth_mac_stats *mac_stats)
960 {
961 	struct dsa_port *dp = dsa_slave_to_port(dev);
962 	struct dsa_switch *ds = dp->ds;
963 
964 	if (ds->ops->get_eth_mac_stats)
965 		ds->ops->get_eth_mac_stats(ds, dp->index, mac_stats);
966 }
967 
968 static void
969 dsa_slave_get_eth_ctrl_stats(struct net_device *dev,
970 			     struct ethtool_eth_ctrl_stats *ctrl_stats)
971 {
972 	struct dsa_port *dp = dsa_slave_to_port(dev);
973 	struct dsa_switch *ds = dp->ds;
974 
975 	if (ds->ops->get_eth_ctrl_stats)
976 		ds->ops->get_eth_ctrl_stats(ds, dp->index, ctrl_stats);
977 }
978 
979 static void dsa_slave_net_selftest(struct net_device *ndev,
980 				   struct ethtool_test *etest, u64 *buf)
981 {
982 	struct dsa_port *dp = dsa_slave_to_port(ndev);
983 	struct dsa_switch *ds = dp->ds;
984 
985 	if (ds->ops->self_test) {
986 		ds->ops->self_test(ds, dp->index, etest, buf);
987 		return;
988 	}
989 
990 	net_selftest(ndev, etest, buf);
991 }
992 
993 static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
994 {
995 	struct dsa_port *dp = dsa_slave_to_port(dev);
996 	struct dsa_switch *ds = dp->ds;
997 
998 	phylink_ethtool_get_wol(dp->pl, w);
999 
1000 	if (ds->ops->get_wol)
1001 		ds->ops->get_wol(ds, dp->index, w);
1002 }
1003 
1004 static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
1005 {
1006 	struct dsa_port *dp = dsa_slave_to_port(dev);
1007 	struct dsa_switch *ds = dp->ds;
1008 	int ret = -EOPNOTSUPP;
1009 
1010 	phylink_ethtool_set_wol(dp->pl, w);
1011 
1012 	if (ds->ops->set_wol)
1013 		ret = ds->ops->set_wol(ds, dp->index, w);
1014 
1015 	return ret;
1016 }
1017 
1018 static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
1019 {
1020 	struct dsa_port *dp = dsa_slave_to_port(dev);
1021 	struct dsa_switch *ds = dp->ds;
1022 	int ret;
1023 
1024 	/* Port's PHY and MAC both need to be EEE capable */
1025 	if (!dev->phydev || !dp->pl)
1026 		return -ENODEV;
1027 
1028 	if (!ds->ops->set_mac_eee)
1029 		return -EOPNOTSUPP;
1030 
1031 	ret = ds->ops->set_mac_eee(ds, dp->index, e);
1032 	if (ret)
1033 		return ret;
1034 
1035 	return phylink_ethtool_set_eee(dp->pl, e);
1036 }
1037 
1038 static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
1039 {
1040 	struct dsa_port *dp = dsa_slave_to_port(dev);
1041 	struct dsa_switch *ds = dp->ds;
1042 	int ret;
1043 
1044 	/* Port's PHY and MAC both need to be EEE capable */
1045 	if (!dev->phydev || !dp->pl)
1046 		return -ENODEV;
1047 
1048 	if (!ds->ops->get_mac_eee)
1049 		return -EOPNOTSUPP;
1050 
1051 	ret = ds->ops->get_mac_eee(ds, dp->index, e);
1052 	if (ret)
1053 		return ret;
1054 
1055 	return phylink_ethtool_get_eee(dp->pl, e);
1056 }
1057 
1058 static int dsa_slave_get_link_ksettings(struct net_device *dev,
1059 					struct ethtool_link_ksettings *cmd)
1060 {
1061 	struct dsa_port *dp = dsa_slave_to_port(dev);
1062 
1063 	return phylink_ethtool_ksettings_get(dp->pl, cmd);
1064 }
1065 
1066 static int dsa_slave_set_link_ksettings(struct net_device *dev,
1067 					const struct ethtool_link_ksettings *cmd)
1068 {
1069 	struct dsa_port *dp = dsa_slave_to_port(dev);
1070 
1071 	return phylink_ethtool_ksettings_set(dp->pl, cmd);
1072 }
1073 
1074 static void dsa_slave_get_pauseparam(struct net_device *dev,
1075 				     struct ethtool_pauseparam *pause)
1076 {
1077 	struct dsa_port *dp = dsa_slave_to_port(dev);
1078 
1079 	phylink_ethtool_get_pauseparam(dp->pl, pause);
1080 }
1081 
1082 static int dsa_slave_set_pauseparam(struct net_device *dev,
1083 				    struct ethtool_pauseparam *pause)
1084 {
1085 	struct dsa_port *dp = dsa_slave_to_port(dev);
1086 
1087 	return phylink_ethtool_set_pauseparam(dp->pl, pause);
1088 }
1089 
1090 #ifdef CONFIG_NET_POLL_CONTROLLER
1091 static int dsa_slave_netpoll_setup(struct net_device *dev,
1092 				   struct netpoll_info *ni)
1093 {
1094 	struct net_device *master = dsa_slave_to_master(dev);
1095 	struct dsa_slave_priv *p = netdev_priv(dev);
1096 	struct netpoll *netpoll;
1097 	int err = 0;
1098 
1099 	netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
1100 	if (!netpoll)
1101 		return -ENOMEM;
1102 
1103 	err = __netpoll_setup(netpoll, master);
1104 	if (err) {
1105 		kfree(netpoll);
1106 		goto out;
1107 	}
1108 
1109 	p->netpoll = netpoll;
1110 out:
1111 	return err;
1112 }
1113 
1114 static void dsa_slave_netpoll_cleanup(struct net_device *dev)
1115 {
1116 	struct dsa_slave_priv *p = netdev_priv(dev);
1117 	struct netpoll *netpoll = p->netpoll;
1118 
1119 	if (!netpoll)
1120 		return;
1121 
1122 	p->netpoll = NULL;
1123 
1124 	__netpoll_free(netpoll);
1125 }
1126 
1127 static void dsa_slave_poll_controller(struct net_device *dev)
1128 {
1129 }
1130 #endif
1131 
1132 static struct dsa_mall_tc_entry *
1133 dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
1134 {
1135 	struct dsa_slave_priv *p = netdev_priv(dev);
1136 	struct dsa_mall_tc_entry *mall_tc_entry;
1137 
1138 	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
1139 		if (mall_tc_entry->cookie == cookie)
1140 			return mall_tc_entry;
1141 
1142 	return NULL;
1143 }
1144 
1145 static int
1146 dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
1147 				  struct tc_cls_matchall_offload *cls,
1148 				  bool ingress)
1149 {
1150 	struct dsa_port *dp = dsa_slave_to_port(dev);
1151 	struct dsa_slave_priv *p = netdev_priv(dev);
1152 	struct dsa_mall_mirror_tc_entry *mirror;
1153 	struct dsa_mall_tc_entry *mall_tc_entry;
1154 	struct dsa_switch *ds = dp->ds;
1155 	struct flow_action_entry *act;
1156 	struct dsa_port *to_dp;
1157 	int err;
1158 
1159 	if (!ds->ops->port_mirror_add)
1160 		return -EOPNOTSUPP;
1161 
1162 	if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1163 					      cls->common.extack))
1164 		return -EOPNOTSUPP;
1165 
1166 	act = &cls->rule->action.entries[0];
1167 
1168 	if (!act->dev)
1169 		return -EINVAL;
1170 
1171 	if (!dsa_slave_dev_check(act->dev))
1172 		return -EOPNOTSUPP;
1173 
1174 	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1175 	if (!mall_tc_entry)
1176 		return -ENOMEM;
1177 
1178 	mall_tc_entry->cookie = cls->cookie;
1179 	mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
1180 	mirror = &mall_tc_entry->mirror;
1181 
1182 	to_dp = dsa_slave_to_port(act->dev);
1183 
1184 	mirror->to_local_port = to_dp->index;
1185 	mirror->ingress = ingress;
1186 
1187 	err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress);
1188 	if (err) {
1189 		kfree(mall_tc_entry);
1190 		return err;
1191 	}
1192 
1193 	list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1194 
1195 	return err;
1196 }
1197 
1198 static int
1199 dsa_slave_add_cls_matchall_police(struct net_device *dev,
1200 				  struct tc_cls_matchall_offload *cls,
1201 				  bool ingress)
1202 {
1203 	struct netlink_ext_ack *extack = cls->common.extack;
1204 	struct dsa_port *dp = dsa_slave_to_port(dev);
1205 	struct dsa_slave_priv *p = netdev_priv(dev);
1206 	struct dsa_mall_policer_tc_entry *policer;
1207 	struct dsa_mall_tc_entry *mall_tc_entry;
1208 	struct dsa_switch *ds = dp->ds;
1209 	struct flow_action_entry *act;
1210 	int err;
1211 
1212 	if (!ds->ops->port_policer_add) {
1213 		NL_SET_ERR_MSG_MOD(extack,
1214 				   "Policing offload not implemented");
1215 		return -EOPNOTSUPP;
1216 	}
1217 
1218 	if (!ingress) {
1219 		NL_SET_ERR_MSG_MOD(extack,
1220 				   "Only supported on ingress qdisc");
1221 		return -EOPNOTSUPP;
1222 	}
1223 
1224 	if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1225 					      cls->common.extack))
1226 		return -EOPNOTSUPP;
1227 
1228 	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
1229 		if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) {
1230 			NL_SET_ERR_MSG_MOD(extack,
1231 					   "Only one port policer allowed");
1232 			return -EEXIST;
1233 		}
1234 	}
1235 
1236 	act = &cls->rule->action.entries[0];
1237 
1238 	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1239 	if (!mall_tc_entry)
1240 		return -ENOMEM;
1241 
1242 	mall_tc_entry->cookie = cls->cookie;
1243 	mall_tc_entry->type = DSA_PORT_MALL_POLICER;
1244 	policer = &mall_tc_entry->policer;
1245 	policer->rate_bytes_per_sec = act->police.rate_bytes_ps;
1246 	policer->burst = act->police.burst;
1247 
1248 	err = ds->ops->port_policer_add(ds, dp->index, policer);
1249 	if (err) {
1250 		kfree(mall_tc_entry);
1251 		return err;
1252 	}
1253 
1254 	list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1255 
1256 	return err;
1257 }
1258 
1259 static int dsa_slave_add_cls_matchall(struct net_device *dev,
1260 				      struct tc_cls_matchall_offload *cls,
1261 				      bool ingress)
1262 {
1263 	int err = -EOPNOTSUPP;
1264 
1265 	if (cls->common.protocol == htons(ETH_P_ALL) &&
1266 	    flow_offload_has_one_action(&cls->rule->action) &&
1267 	    cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
1268 		err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress);
1269 	else if (flow_offload_has_one_action(&cls->rule->action) &&
1270 		 cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
1271 		err = dsa_slave_add_cls_matchall_police(dev, cls, ingress);
1272 
1273 	return err;
1274 }
1275 
1276 static void dsa_slave_del_cls_matchall(struct net_device *dev,
1277 				       struct tc_cls_matchall_offload *cls)
1278 {
1279 	struct dsa_port *dp = dsa_slave_to_port(dev);
1280 	struct dsa_mall_tc_entry *mall_tc_entry;
1281 	struct dsa_switch *ds = dp->ds;
1282 
1283 	mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
1284 	if (!mall_tc_entry)
1285 		return;
1286 
1287 	list_del(&mall_tc_entry->list);
1288 
1289 	switch (mall_tc_entry->type) {
1290 	case DSA_PORT_MALL_MIRROR:
1291 		if (ds->ops->port_mirror_del)
1292 			ds->ops->port_mirror_del(ds, dp->index,
1293 						 &mall_tc_entry->mirror);
1294 		break;
1295 	case DSA_PORT_MALL_POLICER:
1296 		if (ds->ops->port_policer_del)
1297 			ds->ops->port_policer_del(ds, dp->index);
1298 		break;
1299 	default:
1300 		WARN_ON(1);
1301 	}
1302 
1303 	kfree(mall_tc_entry);
1304 }
1305 
1306 static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
1307 					   struct tc_cls_matchall_offload *cls,
1308 					   bool ingress)
1309 {
1310 	if (cls->common.chain_index)
1311 		return -EOPNOTSUPP;
1312 
1313 	switch (cls->command) {
1314 	case TC_CLSMATCHALL_REPLACE:
1315 		return dsa_slave_add_cls_matchall(dev, cls, ingress);
1316 	case TC_CLSMATCHALL_DESTROY:
1317 		dsa_slave_del_cls_matchall(dev, cls);
1318 		return 0;
1319 	default:
1320 		return -EOPNOTSUPP;
1321 	}
1322 }
1323 
1324 static int dsa_slave_add_cls_flower(struct net_device *dev,
1325 				    struct flow_cls_offload *cls,
1326 				    bool ingress)
1327 {
1328 	struct dsa_port *dp = dsa_slave_to_port(dev);
1329 	struct dsa_switch *ds = dp->ds;
1330 	int port = dp->index;
1331 
1332 	if (!ds->ops->cls_flower_add)
1333 		return -EOPNOTSUPP;
1334 
1335 	return ds->ops->cls_flower_add(ds, port, cls, ingress);
1336 }
1337 
1338 static int dsa_slave_del_cls_flower(struct net_device *dev,
1339 				    struct flow_cls_offload *cls,
1340 				    bool ingress)
1341 {
1342 	struct dsa_port *dp = dsa_slave_to_port(dev);
1343 	struct dsa_switch *ds = dp->ds;
1344 	int port = dp->index;
1345 
1346 	if (!ds->ops->cls_flower_del)
1347 		return -EOPNOTSUPP;
1348 
1349 	return ds->ops->cls_flower_del(ds, port, cls, ingress);
1350 }
1351 
1352 static int dsa_slave_stats_cls_flower(struct net_device *dev,
1353 				      struct flow_cls_offload *cls,
1354 				      bool ingress)
1355 {
1356 	struct dsa_port *dp = dsa_slave_to_port(dev);
1357 	struct dsa_switch *ds = dp->ds;
1358 	int port = dp->index;
1359 
1360 	if (!ds->ops->cls_flower_stats)
1361 		return -EOPNOTSUPP;
1362 
1363 	return ds->ops->cls_flower_stats(ds, port, cls, ingress);
1364 }
1365 
1366 static int dsa_slave_setup_tc_cls_flower(struct net_device *dev,
1367 					 struct flow_cls_offload *cls,
1368 					 bool ingress)
1369 {
1370 	switch (cls->command) {
1371 	case FLOW_CLS_REPLACE:
1372 		return dsa_slave_add_cls_flower(dev, cls, ingress);
1373 	case FLOW_CLS_DESTROY:
1374 		return dsa_slave_del_cls_flower(dev, cls, ingress);
1375 	case FLOW_CLS_STATS:
1376 		return dsa_slave_stats_cls_flower(dev, cls, ingress);
1377 	default:
1378 		return -EOPNOTSUPP;
1379 	}
1380 }
1381 
1382 static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1383 				       void *cb_priv, bool ingress)
1384 {
1385 	struct net_device *dev = cb_priv;
1386 
1387 	if (!tc_can_offload(dev))
1388 		return -EOPNOTSUPP;
1389 
1390 	switch (type) {
1391 	case TC_SETUP_CLSMATCHALL:
1392 		return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
1393 	case TC_SETUP_CLSFLOWER:
1394 		return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress);
1395 	default:
1396 		return -EOPNOTSUPP;
1397 	}
1398 }
1399 
1400 static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
1401 					  void *type_data, void *cb_priv)
1402 {
1403 	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
1404 }
1405 
1406 static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
1407 					  void *type_data, void *cb_priv)
1408 {
1409 	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
1410 }
1411 
1412 static LIST_HEAD(dsa_slave_block_cb_list);
1413 
1414 static int dsa_slave_setup_tc_block(struct net_device *dev,
1415 				    struct flow_block_offload *f)
1416 {
1417 	struct flow_block_cb *block_cb;
1418 	flow_setup_cb_t *cb;
1419 
1420 	if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1421 		cb = dsa_slave_setup_tc_block_cb_ig;
1422 	else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1423 		cb = dsa_slave_setup_tc_block_cb_eg;
1424 	else
1425 		return -EOPNOTSUPP;
1426 
1427 	f->driver_block_list = &dsa_slave_block_cb_list;
1428 
1429 	switch (f->command) {
1430 	case FLOW_BLOCK_BIND:
1431 		if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
1432 			return -EBUSY;
1433 
1434 		block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
1435 		if (IS_ERR(block_cb))
1436 			return PTR_ERR(block_cb);
1437 
1438 		flow_block_cb_add(block_cb, f);
1439 		list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
1440 		return 0;
1441 	case FLOW_BLOCK_UNBIND:
1442 		block_cb = flow_block_cb_lookup(f->block, cb, dev);
1443 		if (!block_cb)
1444 			return -ENOENT;
1445 
1446 		flow_block_cb_remove(block_cb, f);
1447 		list_del(&block_cb->driver_list);
1448 		return 0;
1449 	default:
1450 		return -EOPNOTSUPP;
1451 	}
1452 }
1453 
1454 static int dsa_slave_setup_ft_block(struct dsa_switch *ds, int port,
1455 				    void *type_data)
1456 {
1457 	struct dsa_port *cpu_dp = dsa_to_port(ds, port)->cpu_dp;
1458 	struct net_device *master = cpu_dp->master;
1459 
1460 	if (!master->netdev_ops->ndo_setup_tc)
1461 		return -EOPNOTSUPP;
1462 
1463 	return master->netdev_ops->ndo_setup_tc(master, TC_SETUP_FT, type_data);
1464 }
1465 
1466 static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
1467 			      void *type_data)
1468 {
1469 	struct dsa_port *dp = dsa_slave_to_port(dev);
1470 	struct dsa_switch *ds = dp->ds;
1471 
1472 	switch (type) {
1473 	case TC_SETUP_BLOCK:
1474 		return dsa_slave_setup_tc_block(dev, type_data);
1475 	case TC_SETUP_FT:
1476 		return dsa_slave_setup_ft_block(ds, dp->index, type_data);
1477 	default:
1478 		break;
1479 	}
1480 
1481 	if (!ds->ops->port_setup_tc)
1482 		return -EOPNOTSUPP;
1483 
1484 	return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
1485 }
1486 
1487 static int dsa_slave_get_rxnfc(struct net_device *dev,
1488 			       struct ethtool_rxnfc *nfc, u32 *rule_locs)
1489 {
1490 	struct dsa_port *dp = dsa_slave_to_port(dev);
1491 	struct dsa_switch *ds = dp->ds;
1492 
1493 	if (!ds->ops->get_rxnfc)
1494 		return -EOPNOTSUPP;
1495 
1496 	return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
1497 }
1498 
1499 static int dsa_slave_set_rxnfc(struct net_device *dev,
1500 			       struct ethtool_rxnfc *nfc)
1501 {
1502 	struct dsa_port *dp = dsa_slave_to_port(dev);
1503 	struct dsa_switch *ds = dp->ds;
1504 
1505 	if (!ds->ops->set_rxnfc)
1506 		return -EOPNOTSUPP;
1507 
1508 	return ds->ops->set_rxnfc(ds, dp->index, nfc);
1509 }
1510 
1511 static int dsa_slave_get_ts_info(struct net_device *dev,
1512 				 struct ethtool_ts_info *ts)
1513 {
1514 	struct dsa_slave_priv *p = netdev_priv(dev);
1515 	struct dsa_switch *ds = p->dp->ds;
1516 
1517 	if (!ds->ops->get_ts_info)
1518 		return -EOPNOTSUPP;
1519 
1520 	return ds->ops->get_ts_info(ds, p->dp->index, ts);
1521 }
1522 
1523 static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
1524 				     u16 vid)
1525 {
1526 	struct dsa_port *dp = dsa_slave_to_port(dev);
1527 	struct switchdev_obj_port_vlan vlan = {
1528 		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
1529 		.vid = vid,
1530 		/* This API only allows programming tagged, non-PVID VIDs */
1531 		.flags = 0,
1532 	};
1533 	struct netlink_ext_ack extack = {0};
1534 	int ret;
1535 
1536 	/* User port... */
1537 	ret = dsa_port_vlan_add(dp, &vlan, &extack);
1538 	if (ret) {
1539 		if (extack._msg)
1540 			netdev_err(dev, "%s\n", extack._msg);
1541 		return ret;
1542 	}
1543 
1544 	/* And CPU port... */
1545 	ret = dsa_port_host_vlan_add(dp, &vlan, &extack);
1546 	if (ret) {
1547 		if (extack._msg)
1548 			netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index,
1549 				   extack._msg);
1550 		return ret;
1551 	}
1552 
1553 	return 0;
1554 }
1555 
1556 static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
1557 				      u16 vid)
1558 {
1559 	struct dsa_port *dp = dsa_slave_to_port(dev);
1560 	struct switchdev_obj_port_vlan vlan = {
1561 		.vid = vid,
1562 		/* This API only allows programming tagged, non-PVID VIDs */
1563 		.flags = 0,
1564 	};
1565 	int err;
1566 
1567 	err = dsa_port_vlan_del(dp, &vlan);
1568 	if (err)
1569 		return err;
1570 
1571 	return dsa_port_host_vlan_del(dp, &vlan);
1572 }
1573 
1574 static int dsa_slave_restore_vlan(struct net_device *vdev, int vid, void *arg)
1575 {
1576 	__be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
1577 
1578 	return dsa_slave_vlan_rx_add_vid(arg, proto, vid);
1579 }
1580 
1581 static int dsa_slave_clear_vlan(struct net_device *vdev, int vid, void *arg)
1582 {
1583 	__be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
1584 
1585 	return dsa_slave_vlan_rx_kill_vid(arg, proto, vid);
1586 }
1587 
1588 /* Keep the VLAN RX filtering list in sync with the hardware only if VLAN
1589  * filtering is enabled. The baseline is that only ports that offload a
1590  * VLAN-aware bridge are VLAN-aware, and standalone ports are VLAN-unaware,
1591  * but there are exceptions for quirky hardware.
1592  *
1593  * If ds->vlan_filtering_is_global = true, then standalone ports which share
1594  * the same switch with other ports that offload a VLAN-aware bridge are also
1595  * inevitably VLAN-aware.
1596  *
1597  * To summarize, a DSA switch port offloads:
1598  *
1599  * - If standalone (this includes software bridge, software LAG):
1600  *     - if ds->needs_standalone_vlan_filtering = true, OR if
1601  *       (ds->vlan_filtering_is_global = true AND there are bridges spanning
1602  *       this switch chip which have vlan_filtering=1)
1603  *         - the 8021q upper VLANs
1604  *     - else (standalone VLAN filtering is not needed, VLAN filtering is not
1605  *       global, or it is, but no port is under a VLAN-aware bridge):
1606  *         - no VLAN (any 8021q upper is a software VLAN)
1607  *
1608  * - If under a vlan_filtering=0 bridge which it offload:
1609  *     - if ds->configure_vlan_while_not_filtering = true (default):
1610  *         - the bridge VLANs. These VLANs are committed to hardware but inactive.
1611  *     - else (deprecated):
1612  *         - no VLAN. The bridge VLANs are not restored when VLAN awareness is
1613  *           enabled, so this behavior is broken and discouraged.
1614  *
1615  * - If under a vlan_filtering=1 bridge which it offload:
1616  *     - the bridge VLANs
1617  *     - the 8021q upper VLANs
1618  */
1619 int dsa_slave_manage_vlan_filtering(struct net_device *slave,
1620 				    bool vlan_filtering)
1621 {
1622 	int err;
1623 
1624 	if (vlan_filtering) {
1625 		slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1626 
1627 		err = vlan_for_each(slave, dsa_slave_restore_vlan, slave);
1628 		if (err) {
1629 			vlan_for_each(slave, dsa_slave_clear_vlan, slave);
1630 			slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1631 			return err;
1632 		}
1633 	} else {
1634 		err = vlan_for_each(slave, dsa_slave_clear_vlan, slave);
1635 		if (err)
1636 			return err;
1637 
1638 		slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1639 	}
1640 
1641 	return 0;
1642 }
1643 
1644 struct dsa_hw_port {
1645 	struct list_head list;
1646 	struct net_device *dev;
1647 	int old_mtu;
1648 };
1649 
1650 static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu)
1651 {
1652 	const struct dsa_hw_port *p;
1653 	int err;
1654 
1655 	list_for_each_entry(p, hw_port_list, list) {
1656 		if (p->dev->mtu == mtu)
1657 			continue;
1658 
1659 		err = dev_set_mtu(p->dev, mtu);
1660 		if (err)
1661 			goto rollback;
1662 	}
1663 
1664 	return 0;
1665 
1666 rollback:
1667 	list_for_each_entry_continue_reverse(p, hw_port_list, list) {
1668 		if (p->dev->mtu == p->old_mtu)
1669 			continue;
1670 
1671 		if (dev_set_mtu(p->dev, p->old_mtu))
1672 			netdev_err(p->dev, "Failed to restore MTU\n");
1673 	}
1674 
1675 	return err;
1676 }
1677 
1678 static void dsa_hw_port_list_free(struct list_head *hw_port_list)
1679 {
1680 	struct dsa_hw_port *p, *n;
1681 
1682 	list_for_each_entry_safe(p, n, hw_port_list, list)
1683 		kfree(p);
1684 }
1685 
1686 /* Make the hardware datapath to/from @dev limited to a common MTU */
1687 static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
1688 {
1689 	struct list_head hw_port_list;
1690 	struct dsa_switch_tree *dst;
1691 	int min_mtu = ETH_MAX_MTU;
1692 	struct dsa_port *other_dp;
1693 	int err;
1694 
1695 	if (!dp->ds->mtu_enforcement_ingress)
1696 		return;
1697 
1698 	if (!dp->bridge)
1699 		return;
1700 
1701 	INIT_LIST_HEAD(&hw_port_list);
1702 
1703 	/* Populate the list of ports that are part of the same bridge
1704 	 * as the newly added/modified port
1705 	 */
1706 	list_for_each_entry(dst, &dsa_tree_list, list) {
1707 		list_for_each_entry(other_dp, &dst->ports, list) {
1708 			struct dsa_hw_port *hw_port;
1709 			struct net_device *slave;
1710 
1711 			if (other_dp->type != DSA_PORT_TYPE_USER)
1712 				continue;
1713 
1714 			if (!dsa_port_bridge_same(dp, other_dp))
1715 				continue;
1716 
1717 			if (!other_dp->ds->mtu_enforcement_ingress)
1718 				continue;
1719 
1720 			slave = other_dp->slave;
1721 
1722 			if (min_mtu > slave->mtu)
1723 				min_mtu = slave->mtu;
1724 
1725 			hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
1726 			if (!hw_port)
1727 				goto out;
1728 
1729 			hw_port->dev = slave;
1730 			hw_port->old_mtu = slave->mtu;
1731 
1732 			list_add(&hw_port->list, &hw_port_list);
1733 		}
1734 	}
1735 
1736 	/* Attempt to configure the entire hardware bridge to the newly added
1737 	 * interface's MTU first, regardless of whether the intention of the
1738 	 * user was to raise or lower it.
1739 	 */
1740 	err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu);
1741 	if (!err)
1742 		goto out;
1743 
1744 	/* Clearly that didn't work out so well, so just set the minimum MTU on
1745 	 * all hardware bridge ports now. If this fails too, then all ports will
1746 	 * still have their old MTU rolled back anyway.
1747 	 */
1748 	dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu);
1749 
1750 out:
1751 	dsa_hw_port_list_free(&hw_port_list);
1752 }
1753 
1754 int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
1755 {
1756 	struct net_device *master = dsa_slave_to_master(dev);
1757 	struct dsa_port *dp = dsa_slave_to_port(dev);
1758 	struct dsa_slave_priv *p = netdev_priv(dev);
1759 	struct dsa_switch *ds = p->dp->ds;
1760 	struct dsa_port *dp_iter;
1761 	struct dsa_port *cpu_dp;
1762 	int port = p->dp->index;
1763 	int largest_mtu = 0;
1764 	int new_master_mtu;
1765 	int old_master_mtu;
1766 	int mtu_limit;
1767 	int cpu_mtu;
1768 	int err;
1769 
1770 	if (!ds->ops->port_change_mtu)
1771 		return -EOPNOTSUPP;
1772 
1773 	list_for_each_entry(dp_iter, &ds->dst->ports, list) {
1774 		int slave_mtu;
1775 
1776 		if (!dsa_port_is_user(dp_iter))
1777 			continue;
1778 
1779 		/* During probe, this function will be called for each slave
1780 		 * device, while not all of them have been allocated. That's
1781 		 * ok, it doesn't change what the maximum is, so ignore it.
1782 		 */
1783 		if (!dp_iter->slave)
1784 			continue;
1785 
1786 		/* Pretend that we already applied the setting, which we
1787 		 * actually haven't (still haven't done all integrity checks)
1788 		 */
1789 		if (dp_iter == dp)
1790 			slave_mtu = new_mtu;
1791 		else
1792 			slave_mtu = dp_iter->slave->mtu;
1793 
1794 		if (largest_mtu < slave_mtu)
1795 			largest_mtu = slave_mtu;
1796 	}
1797 
1798 	cpu_dp = dsa_to_port(ds, port)->cpu_dp;
1799 
1800 	mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
1801 	old_master_mtu = master->mtu;
1802 	new_master_mtu = largest_mtu + dsa_tag_protocol_overhead(cpu_dp->tag_ops);
1803 	if (new_master_mtu > mtu_limit)
1804 		return -ERANGE;
1805 
1806 	/* If the master MTU isn't over limit, there's no need to check the CPU
1807 	 * MTU, since that surely isn't either.
1808 	 */
1809 	cpu_mtu = largest_mtu;
1810 
1811 	/* Start applying stuff */
1812 	if (new_master_mtu != old_master_mtu) {
1813 		err = dev_set_mtu(master, new_master_mtu);
1814 		if (err < 0)
1815 			goto out_master_failed;
1816 
1817 		/* We only need to propagate the MTU of the CPU port to
1818 		 * upstream switches, so create a non-targeted notifier which
1819 		 * updates all switches.
1820 		 */
1821 		err = dsa_port_mtu_change(cpu_dp, cpu_mtu, false);
1822 		if (err)
1823 			goto out_cpu_failed;
1824 	}
1825 
1826 	err = dsa_port_mtu_change(dp, new_mtu, true);
1827 	if (err)
1828 		goto out_port_failed;
1829 
1830 	dev->mtu = new_mtu;
1831 
1832 	dsa_bridge_mtu_normalization(dp);
1833 
1834 	return 0;
1835 
1836 out_port_failed:
1837 	if (new_master_mtu != old_master_mtu)
1838 		dsa_port_mtu_change(cpu_dp, old_master_mtu -
1839 				    dsa_tag_protocol_overhead(cpu_dp->tag_ops),
1840 				    false);
1841 out_cpu_failed:
1842 	if (new_master_mtu != old_master_mtu)
1843 		dev_set_mtu(master, old_master_mtu);
1844 out_master_failed:
1845 	return err;
1846 }
1847 
1848 static const struct ethtool_ops dsa_slave_ethtool_ops = {
1849 	.get_drvinfo		= dsa_slave_get_drvinfo,
1850 	.get_regs_len		= dsa_slave_get_regs_len,
1851 	.get_regs		= dsa_slave_get_regs,
1852 	.nway_reset		= dsa_slave_nway_reset,
1853 	.get_link		= ethtool_op_get_link,
1854 	.get_eeprom_len		= dsa_slave_get_eeprom_len,
1855 	.get_eeprom		= dsa_slave_get_eeprom,
1856 	.set_eeprom		= dsa_slave_set_eeprom,
1857 	.get_strings		= dsa_slave_get_strings,
1858 	.get_ethtool_stats	= dsa_slave_get_ethtool_stats,
1859 	.get_sset_count		= dsa_slave_get_sset_count,
1860 	.get_eth_phy_stats	= dsa_slave_get_eth_phy_stats,
1861 	.get_eth_mac_stats	= dsa_slave_get_eth_mac_stats,
1862 	.get_eth_ctrl_stats	= dsa_slave_get_eth_ctrl_stats,
1863 	.set_wol		= dsa_slave_set_wol,
1864 	.get_wol		= dsa_slave_get_wol,
1865 	.set_eee		= dsa_slave_set_eee,
1866 	.get_eee		= dsa_slave_get_eee,
1867 	.get_link_ksettings	= dsa_slave_get_link_ksettings,
1868 	.set_link_ksettings	= dsa_slave_set_link_ksettings,
1869 	.get_pauseparam		= dsa_slave_get_pauseparam,
1870 	.set_pauseparam		= dsa_slave_set_pauseparam,
1871 	.get_rxnfc		= dsa_slave_get_rxnfc,
1872 	.set_rxnfc		= dsa_slave_set_rxnfc,
1873 	.get_ts_info		= dsa_slave_get_ts_info,
1874 	.self_test		= dsa_slave_net_selftest,
1875 };
1876 
1877 static struct devlink_port *dsa_slave_get_devlink_port(struct net_device *dev)
1878 {
1879 	struct dsa_port *dp = dsa_slave_to_port(dev);
1880 
1881 	return &dp->devlink_port;
1882 }
1883 
1884 static void dsa_slave_get_stats64(struct net_device *dev,
1885 				  struct rtnl_link_stats64 *s)
1886 {
1887 	struct dsa_port *dp = dsa_slave_to_port(dev);
1888 	struct dsa_switch *ds = dp->ds;
1889 
1890 	if (ds->ops->get_stats64)
1891 		ds->ops->get_stats64(ds, dp->index, s);
1892 	else
1893 		dev_get_tstats64(dev, s);
1894 }
1895 
1896 static int dsa_slave_fill_forward_path(struct net_device_path_ctx *ctx,
1897 				       struct net_device_path *path)
1898 {
1899 	struct dsa_port *dp = dsa_slave_to_port(ctx->dev);
1900 	struct dsa_port *cpu_dp = dp->cpu_dp;
1901 
1902 	path->dev = ctx->dev;
1903 	path->type = DEV_PATH_DSA;
1904 	path->dsa.proto = cpu_dp->tag_ops->proto;
1905 	path->dsa.port = dp->index;
1906 	ctx->dev = cpu_dp->master;
1907 
1908 	return 0;
1909 }
1910 
1911 static const struct net_device_ops dsa_slave_netdev_ops = {
1912 	.ndo_open	 	= dsa_slave_open,
1913 	.ndo_stop		= dsa_slave_close,
1914 	.ndo_start_xmit		= dsa_slave_xmit,
1915 	.ndo_change_rx_flags	= dsa_slave_change_rx_flags,
1916 	.ndo_set_rx_mode	= dsa_slave_set_rx_mode,
1917 	.ndo_set_mac_address	= dsa_slave_set_mac_address,
1918 	.ndo_fdb_dump		= dsa_slave_fdb_dump,
1919 	.ndo_eth_ioctl		= dsa_slave_ioctl,
1920 	.ndo_get_iflink		= dsa_slave_get_iflink,
1921 #ifdef CONFIG_NET_POLL_CONTROLLER
1922 	.ndo_netpoll_setup	= dsa_slave_netpoll_setup,
1923 	.ndo_netpoll_cleanup	= dsa_slave_netpoll_cleanup,
1924 	.ndo_poll_controller	= dsa_slave_poll_controller,
1925 #endif
1926 	.ndo_setup_tc		= dsa_slave_setup_tc,
1927 	.ndo_get_stats64	= dsa_slave_get_stats64,
1928 	.ndo_vlan_rx_add_vid	= dsa_slave_vlan_rx_add_vid,
1929 	.ndo_vlan_rx_kill_vid	= dsa_slave_vlan_rx_kill_vid,
1930 	.ndo_get_devlink_port	= dsa_slave_get_devlink_port,
1931 	.ndo_change_mtu		= dsa_slave_change_mtu,
1932 	.ndo_fill_forward_path	= dsa_slave_fill_forward_path,
1933 };
1934 
1935 static struct device_type dsa_type = {
1936 	.name	= "dsa",
1937 };
1938 
1939 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
1940 {
1941 	const struct dsa_port *dp = dsa_to_port(ds, port);
1942 
1943 	if (dp->pl)
1944 		phylink_mac_change(dp->pl, up);
1945 }
1946 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
1947 
1948 static void dsa_slave_phylink_fixed_state(struct phylink_config *config,
1949 					  struct phylink_link_state *state)
1950 {
1951 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1952 	struct dsa_switch *ds = dp->ds;
1953 
1954 	/* No need to check that this operation is valid, the callback would
1955 	 * not be called if it was not.
1956 	 */
1957 	ds->ops->phylink_fixed_state(ds, dp->index, state);
1958 }
1959 
1960 /* slave device setup *******************************************************/
1961 static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr,
1962 				 u32 flags)
1963 {
1964 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1965 	struct dsa_switch *ds = dp->ds;
1966 
1967 	slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
1968 	if (!slave_dev->phydev) {
1969 		netdev_err(slave_dev, "no phy at %d\n", addr);
1970 		return -ENODEV;
1971 	}
1972 
1973 	slave_dev->phydev->dev_flags |= flags;
1974 
1975 	return phylink_connect_phy(dp->pl, slave_dev->phydev);
1976 }
1977 
1978 static int dsa_slave_phy_setup(struct net_device *slave_dev)
1979 {
1980 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1981 	struct device_node *port_dn = dp->dn;
1982 	struct dsa_switch *ds = dp->ds;
1983 	u32 phy_flags = 0;
1984 	int ret;
1985 
1986 	dp->pl_config.dev = &slave_dev->dev;
1987 	dp->pl_config.type = PHYLINK_NETDEV;
1988 
1989 	/* The get_fixed_state callback takes precedence over polling the
1990 	 * link GPIO in PHYLINK (see phylink_get_fixed_state).  Only set
1991 	 * this if the switch provides such a callback.
1992 	 */
1993 	if (ds->ops->phylink_fixed_state) {
1994 		dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state;
1995 		dp->pl_config.poll_fixed_state = true;
1996 	}
1997 
1998 	ret = dsa_port_phylink_create(dp);
1999 	if (ret)
2000 		return ret;
2001 
2002 	if (ds->ops->get_phy_flags)
2003 		phy_flags = ds->ops->get_phy_flags(ds, dp->index);
2004 
2005 	ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
2006 	if (ret == -ENODEV && ds->slave_mii_bus) {
2007 		/* We could not connect to a designated PHY or SFP, so try to
2008 		 * use the switch internal MDIO bus instead
2009 		 */
2010 		ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags);
2011 	}
2012 	if (ret) {
2013 		netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
2014 			   ERR_PTR(ret));
2015 		phylink_destroy(dp->pl);
2016 	}
2017 
2018 	return ret;
2019 }
2020 
2021 void dsa_slave_setup_tagger(struct net_device *slave)
2022 {
2023 	struct dsa_port *dp = dsa_slave_to_port(slave);
2024 	struct dsa_slave_priv *p = netdev_priv(slave);
2025 	const struct dsa_port *cpu_dp = dp->cpu_dp;
2026 	struct net_device *master = cpu_dp->master;
2027 	const struct dsa_switch *ds = dp->ds;
2028 
2029 	slave->needed_headroom = cpu_dp->tag_ops->needed_headroom;
2030 	slave->needed_tailroom = cpu_dp->tag_ops->needed_tailroom;
2031 	/* Try to save one extra realloc later in the TX path (in the master)
2032 	 * by also inheriting the master's needed headroom and tailroom.
2033 	 * The 8021q driver also does this.
2034 	 */
2035 	slave->needed_headroom += master->needed_headroom;
2036 	slave->needed_tailroom += master->needed_tailroom;
2037 
2038 	p->xmit = cpu_dp->tag_ops->xmit;
2039 
2040 	slave->features = master->vlan_features | NETIF_F_HW_TC;
2041 	slave->hw_features |= NETIF_F_HW_TC;
2042 	slave->features |= NETIF_F_LLTX;
2043 	if (slave->needed_tailroom)
2044 		slave->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
2045 	if (ds->needs_standalone_vlan_filtering)
2046 		slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2047 }
2048 
2049 int dsa_slave_suspend(struct net_device *slave_dev)
2050 {
2051 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2052 
2053 	if (!netif_running(slave_dev))
2054 		return 0;
2055 
2056 	netif_device_detach(slave_dev);
2057 
2058 	rtnl_lock();
2059 	phylink_stop(dp->pl);
2060 	rtnl_unlock();
2061 
2062 	return 0;
2063 }
2064 
2065 int dsa_slave_resume(struct net_device *slave_dev)
2066 {
2067 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2068 
2069 	if (!netif_running(slave_dev))
2070 		return 0;
2071 
2072 	netif_device_attach(slave_dev);
2073 
2074 	rtnl_lock();
2075 	phylink_start(dp->pl);
2076 	rtnl_unlock();
2077 
2078 	return 0;
2079 }
2080 
2081 int dsa_slave_create(struct dsa_port *port)
2082 {
2083 	const struct dsa_port *cpu_dp = port->cpu_dp;
2084 	struct net_device *master = cpu_dp->master;
2085 	struct dsa_switch *ds = port->ds;
2086 	const char *name = port->name;
2087 	struct net_device *slave_dev;
2088 	struct dsa_slave_priv *p;
2089 	int ret;
2090 
2091 	if (!ds->num_tx_queues)
2092 		ds->num_tx_queues = 1;
2093 
2094 	slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
2095 				     NET_NAME_UNKNOWN, ether_setup,
2096 				     ds->num_tx_queues, 1);
2097 	if (slave_dev == NULL)
2098 		return -ENOMEM;
2099 
2100 	slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
2101 	if (!is_zero_ether_addr(port->mac))
2102 		eth_hw_addr_set(slave_dev, port->mac);
2103 	else
2104 		eth_hw_addr_inherit(slave_dev, master);
2105 	slave_dev->priv_flags |= IFF_NO_QUEUE;
2106 	if (dsa_switch_supports_uc_filtering(ds))
2107 		slave_dev->priv_flags |= IFF_UNICAST_FLT;
2108 	slave_dev->netdev_ops = &dsa_slave_netdev_ops;
2109 	if (ds->ops->port_max_mtu)
2110 		slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
2111 	SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
2112 
2113 	SET_NETDEV_DEV(slave_dev, port->ds->dev);
2114 	slave_dev->dev.of_node = port->dn;
2115 	slave_dev->vlan_features = master->vlan_features;
2116 
2117 	p = netdev_priv(slave_dev);
2118 	slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2119 	if (!slave_dev->tstats) {
2120 		free_netdev(slave_dev);
2121 		return -ENOMEM;
2122 	}
2123 
2124 	ret = gro_cells_init(&p->gcells, slave_dev);
2125 	if (ret)
2126 		goto out_free;
2127 
2128 	p->dp = port;
2129 	INIT_LIST_HEAD(&p->mall_tc_list);
2130 	port->slave = slave_dev;
2131 	dsa_slave_setup_tagger(slave_dev);
2132 
2133 	netif_carrier_off(slave_dev);
2134 
2135 	ret = dsa_slave_phy_setup(slave_dev);
2136 	if (ret) {
2137 		netdev_err(slave_dev,
2138 			   "error %d setting up PHY for tree %d, switch %d, port %d\n",
2139 			   ret, ds->dst->index, ds->index, port->index);
2140 		goto out_gcells;
2141 	}
2142 
2143 	rtnl_lock();
2144 
2145 	ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
2146 	if (ret && ret != -EOPNOTSUPP)
2147 		dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
2148 			 ret, ETH_DATA_LEN, port->index);
2149 
2150 	ret = register_netdevice(slave_dev);
2151 	if (ret) {
2152 		netdev_err(master, "error %d registering interface %s\n",
2153 			   ret, slave_dev->name);
2154 		rtnl_unlock();
2155 		goto out_phy;
2156 	}
2157 
2158 	ret = netdev_upper_dev_link(master, slave_dev, NULL);
2159 
2160 	rtnl_unlock();
2161 
2162 	if (ret)
2163 		goto out_unregister;
2164 
2165 	return 0;
2166 
2167 out_unregister:
2168 	unregister_netdev(slave_dev);
2169 out_phy:
2170 	rtnl_lock();
2171 	phylink_disconnect_phy(p->dp->pl);
2172 	rtnl_unlock();
2173 	phylink_destroy(p->dp->pl);
2174 out_gcells:
2175 	gro_cells_destroy(&p->gcells);
2176 out_free:
2177 	free_percpu(slave_dev->tstats);
2178 	free_netdev(slave_dev);
2179 	port->slave = NULL;
2180 	return ret;
2181 }
2182 
2183 void dsa_slave_destroy(struct net_device *slave_dev)
2184 {
2185 	struct net_device *master = dsa_slave_to_master(slave_dev);
2186 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2187 	struct dsa_slave_priv *p = netdev_priv(slave_dev);
2188 
2189 	netif_carrier_off(slave_dev);
2190 	rtnl_lock();
2191 	netdev_upper_dev_unlink(master, slave_dev);
2192 	unregister_netdevice(slave_dev);
2193 	phylink_disconnect_phy(dp->pl);
2194 	rtnl_unlock();
2195 
2196 	phylink_destroy(dp->pl);
2197 	gro_cells_destroy(&p->gcells);
2198 	free_percpu(slave_dev->tstats);
2199 	free_netdev(slave_dev);
2200 }
2201 
2202 bool dsa_slave_dev_check(const struct net_device *dev)
2203 {
2204 	return dev->netdev_ops == &dsa_slave_netdev_ops;
2205 }
2206 EXPORT_SYMBOL_GPL(dsa_slave_dev_check);
2207 
2208 static int dsa_slave_changeupper(struct net_device *dev,
2209 				 struct netdev_notifier_changeupper_info *info)
2210 {
2211 	struct dsa_port *dp = dsa_slave_to_port(dev);
2212 	struct netlink_ext_ack *extack;
2213 	int err = NOTIFY_DONE;
2214 
2215 	extack = netdev_notifier_info_to_extack(&info->info);
2216 
2217 	if (netif_is_bridge_master(info->upper_dev)) {
2218 		if (info->linking) {
2219 			err = dsa_port_bridge_join(dp, info->upper_dev, extack);
2220 			if (!err)
2221 				dsa_bridge_mtu_normalization(dp);
2222 			if (err == -EOPNOTSUPP) {
2223 				NL_SET_ERR_MSG_MOD(extack,
2224 						   "Offloading not supported");
2225 				err = 0;
2226 			}
2227 			err = notifier_from_errno(err);
2228 		} else {
2229 			dsa_port_bridge_leave(dp, info->upper_dev);
2230 			err = NOTIFY_OK;
2231 		}
2232 	} else if (netif_is_lag_master(info->upper_dev)) {
2233 		if (info->linking) {
2234 			err = dsa_port_lag_join(dp, info->upper_dev,
2235 						info->upper_info, extack);
2236 			if (err == -EOPNOTSUPP) {
2237 				NL_SET_ERR_MSG_MOD(info->info.extack,
2238 						   "Offloading not supported");
2239 				err = 0;
2240 			}
2241 			err = notifier_from_errno(err);
2242 		} else {
2243 			dsa_port_lag_leave(dp, info->upper_dev);
2244 			err = NOTIFY_OK;
2245 		}
2246 	} else if (is_hsr_master(info->upper_dev)) {
2247 		if (info->linking) {
2248 			err = dsa_port_hsr_join(dp, info->upper_dev);
2249 			if (err == -EOPNOTSUPP) {
2250 				NL_SET_ERR_MSG_MOD(info->info.extack,
2251 						   "Offloading not supported");
2252 				err = 0;
2253 			}
2254 			err = notifier_from_errno(err);
2255 		} else {
2256 			dsa_port_hsr_leave(dp, info->upper_dev);
2257 			err = NOTIFY_OK;
2258 		}
2259 	}
2260 
2261 	return err;
2262 }
2263 
2264 static int dsa_slave_prechangeupper(struct net_device *dev,
2265 				    struct netdev_notifier_changeupper_info *info)
2266 {
2267 	struct dsa_port *dp = dsa_slave_to_port(dev);
2268 
2269 	if (netif_is_bridge_master(info->upper_dev) && !info->linking)
2270 		dsa_port_pre_bridge_leave(dp, info->upper_dev);
2271 	else if (netif_is_lag_master(info->upper_dev) && !info->linking)
2272 		dsa_port_pre_lag_leave(dp, info->upper_dev);
2273 	/* dsa_port_pre_hsr_leave is not yet necessary since hsr cannot be
2274 	 * meaningfully enslaved to a bridge yet
2275 	 */
2276 
2277 	return NOTIFY_DONE;
2278 }
2279 
2280 static int
2281 dsa_slave_lag_changeupper(struct net_device *dev,
2282 			  struct netdev_notifier_changeupper_info *info)
2283 {
2284 	struct net_device *lower;
2285 	struct list_head *iter;
2286 	int err = NOTIFY_DONE;
2287 	struct dsa_port *dp;
2288 
2289 	netdev_for_each_lower_dev(dev, lower, iter) {
2290 		if (!dsa_slave_dev_check(lower))
2291 			continue;
2292 
2293 		dp = dsa_slave_to_port(lower);
2294 		if (!dp->lag)
2295 			/* Software LAG */
2296 			continue;
2297 
2298 		err = dsa_slave_changeupper(lower, info);
2299 		if (notifier_to_errno(err))
2300 			break;
2301 	}
2302 
2303 	return err;
2304 }
2305 
2306 /* Same as dsa_slave_lag_changeupper() except that it calls
2307  * dsa_slave_prechangeupper()
2308  */
2309 static int
2310 dsa_slave_lag_prechangeupper(struct net_device *dev,
2311 			     struct netdev_notifier_changeupper_info *info)
2312 {
2313 	struct net_device *lower;
2314 	struct list_head *iter;
2315 	int err = NOTIFY_DONE;
2316 	struct dsa_port *dp;
2317 
2318 	netdev_for_each_lower_dev(dev, lower, iter) {
2319 		if (!dsa_slave_dev_check(lower))
2320 			continue;
2321 
2322 		dp = dsa_slave_to_port(lower);
2323 		if (!dp->lag)
2324 			/* Software LAG */
2325 			continue;
2326 
2327 		err = dsa_slave_prechangeupper(lower, info);
2328 		if (notifier_to_errno(err))
2329 			break;
2330 	}
2331 
2332 	return err;
2333 }
2334 
2335 static int
2336 dsa_prevent_bridging_8021q_upper(struct net_device *dev,
2337 				 struct netdev_notifier_changeupper_info *info)
2338 {
2339 	struct netlink_ext_ack *ext_ack;
2340 	struct net_device *slave, *br;
2341 	struct dsa_port *dp;
2342 
2343 	ext_ack = netdev_notifier_info_to_extack(&info->info);
2344 
2345 	if (!is_vlan_dev(dev))
2346 		return NOTIFY_DONE;
2347 
2348 	slave = vlan_dev_real_dev(dev);
2349 	if (!dsa_slave_dev_check(slave))
2350 		return NOTIFY_DONE;
2351 
2352 	dp = dsa_slave_to_port(slave);
2353 	br = dsa_port_bridge_dev_get(dp);
2354 	if (!br)
2355 		return NOTIFY_DONE;
2356 
2357 	/* Deny enslaving a VLAN device into a VLAN-aware bridge */
2358 	if (br_vlan_enabled(br) &&
2359 	    netif_is_bridge_master(info->upper_dev) && info->linking) {
2360 		NL_SET_ERR_MSG_MOD(ext_ack,
2361 				   "Cannot enslave VLAN device into VLAN aware bridge");
2362 		return notifier_from_errno(-EINVAL);
2363 	}
2364 
2365 	return NOTIFY_DONE;
2366 }
2367 
2368 static int
2369 dsa_slave_check_8021q_upper(struct net_device *dev,
2370 			    struct netdev_notifier_changeupper_info *info)
2371 {
2372 	struct dsa_port *dp = dsa_slave_to_port(dev);
2373 	struct net_device *br = dsa_port_bridge_dev_get(dp);
2374 	struct bridge_vlan_info br_info;
2375 	struct netlink_ext_ack *extack;
2376 	int err = NOTIFY_DONE;
2377 	u16 vid;
2378 
2379 	if (!br || !br_vlan_enabled(br))
2380 		return NOTIFY_DONE;
2381 
2382 	extack = netdev_notifier_info_to_extack(&info->info);
2383 	vid = vlan_dev_vlan_id(info->upper_dev);
2384 
2385 	/* br_vlan_get_info() returns -EINVAL or -ENOENT if the
2386 	 * device, respectively the VID is not found, returning
2387 	 * 0 means success, which is a failure for us here.
2388 	 */
2389 	err = br_vlan_get_info(br, vid, &br_info);
2390 	if (err == 0) {
2391 		NL_SET_ERR_MSG_MOD(extack,
2392 				   "This VLAN is already configured by the bridge");
2393 		return notifier_from_errno(-EBUSY);
2394 	}
2395 
2396 	return NOTIFY_DONE;
2397 }
2398 
2399 static int
2400 dsa_slave_prechangeupper_sanity_check(struct net_device *dev,
2401 				      struct netdev_notifier_changeupper_info *info)
2402 {
2403 	struct dsa_switch *ds;
2404 	struct dsa_port *dp;
2405 	int err;
2406 
2407 	if (!dsa_slave_dev_check(dev))
2408 		return dsa_prevent_bridging_8021q_upper(dev, info);
2409 
2410 	dp = dsa_slave_to_port(dev);
2411 	ds = dp->ds;
2412 
2413 	if (ds->ops->port_prechangeupper) {
2414 		err = ds->ops->port_prechangeupper(ds, dp->index, info);
2415 		if (err)
2416 			return notifier_from_errno(err);
2417 	}
2418 
2419 	if (is_vlan_dev(info->upper_dev))
2420 		return dsa_slave_check_8021q_upper(dev, info);
2421 
2422 	return NOTIFY_DONE;
2423 }
2424 
2425 static int dsa_slave_netdevice_event(struct notifier_block *nb,
2426 				     unsigned long event, void *ptr)
2427 {
2428 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2429 
2430 	switch (event) {
2431 	case NETDEV_PRECHANGEUPPER: {
2432 		struct netdev_notifier_changeupper_info *info = ptr;
2433 		int err;
2434 
2435 		err = dsa_slave_prechangeupper_sanity_check(dev, info);
2436 		if (err != NOTIFY_DONE)
2437 			return err;
2438 
2439 		if (dsa_slave_dev_check(dev))
2440 			return dsa_slave_prechangeupper(dev, ptr);
2441 
2442 		if (netif_is_lag_master(dev))
2443 			return dsa_slave_lag_prechangeupper(dev, ptr);
2444 
2445 		break;
2446 	}
2447 	case NETDEV_CHANGEUPPER:
2448 		if (dsa_slave_dev_check(dev))
2449 			return dsa_slave_changeupper(dev, ptr);
2450 
2451 		if (netif_is_lag_master(dev))
2452 			return dsa_slave_lag_changeupper(dev, ptr);
2453 
2454 		break;
2455 	case NETDEV_CHANGELOWERSTATE: {
2456 		struct netdev_notifier_changelowerstate_info *info = ptr;
2457 		struct dsa_port *dp;
2458 		int err;
2459 
2460 		if (!dsa_slave_dev_check(dev))
2461 			break;
2462 
2463 		dp = dsa_slave_to_port(dev);
2464 
2465 		err = dsa_port_lag_change(dp, info->lower_state_info);
2466 		return notifier_from_errno(err);
2467 	}
2468 	case NETDEV_CHANGE:
2469 	case NETDEV_UP: {
2470 		/* Track state of master port.
2471 		 * DSA driver may require the master port (and indirectly
2472 		 * the tagger) to be available for some special operation.
2473 		 */
2474 		if (netdev_uses_dsa(dev)) {
2475 			struct dsa_port *cpu_dp = dev->dsa_ptr;
2476 			struct dsa_switch_tree *dst = cpu_dp->ds->dst;
2477 
2478 			/* Track when the master port is UP */
2479 			dsa_tree_master_oper_state_change(dst, dev,
2480 							  netif_oper_up(dev));
2481 
2482 			/* Track when the master port is ready and can accept
2483 			 * packet.
2484 			 * NETDEV_UP event is not enough to flag a port as ready.
2485 			 * We also have to wait for linkwatch_do_dev to dev_activate
2486 			 * and emit a NETDEV_CHANGE event.
2487 			 * We check if a master port is ready by checking if the dev
2488 			 * have a qdisc assigned and is not noop.
2489 			 */
2490 			dsa_tree_master_admin_state_change(dst, dev,
2491 							   !qdisc_tx_is_noop(dev));
2492 
2493 			return NOTIFY_OK;
2494 		}
2495 
2496 		return NOTIFY_DONE;
2497 	}
2498 	case NETDEV_GOING_DOWN: {
2499 		struct dsa_port *dp, *cpu_dp;
2500 		struct dsa_switch_tree *dst;
2501 		LIST_HEAD(close_list);
2502 
2503 		if (!netdev_uses_dsa(dev))
2504 			return NOTIFY_DONE;
2505 
2506 		cpu_dp = dev->dsa_ptr;
2507 		dst = cpu_dp->ds->dst;
2508 
2509 		dsa_tree_master_admin_state_change(dst, dev, false);
2510 
2511 		list_for_each_entry(dp, &dst->ports, list) {
2512 			if (!dsa_port_is_user(dp))
2513 				continue;
2514 
2515 			list_add(&dp->slave->close_list, &close_list);
2516 		}
2517 
2518 		dev_close_many(&close_list, true);
2519 
2520 		return NOTIFY_OK;
2521 	}
2522 	default:
2523 		break;
2524 	}
2525 
2526 	return NOTIFY_DONE;
2527 }
2528 
2529 static void
2530 dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
2531 {
2532 	struct switchdev_notifier_fdb_info info = {};
2533 
2534 	info.addr = switchdev_work->addr;
2535 	info.vid = switchdev_work->vid;
2536 	info.offloaded = true;
2537 	call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
2538 				 switchdev_work->orig_dev, &info.info, NULL);
2539 }
2540 
2541 static void dsa_slave_switchdev_event_work(struct work_struct *work)
2542 {
2543 	struct dsa_switchdev_event_work *switchdev_work =
2544 		container_of(work, struct dsa_switchdev_event_work, work);
2545 	const unsigned char *addr = switchdev_work->addr;
2546 	struct net_device *dev = switchdev_work->dev;
2547 	u16 vid = switchdev_work->vid;
2548 	struct dsa_switch *ds;
2549 	struct dsa_port *dp;
2550 	int err;
2551 
2552 	dp = dsa_slave_to_port(dev);
2553 	ds = dp->ds;
2554 
2555 	switch (switchdev_work->event) {
2556 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2557 		if (switchdev_work->host_addr)
2558 			err = dsa_port_bridge_host_fdb_add(dp, addr, vid);
2559 		else if (dp->lag)
2560 			err = dsa_port_lag_fdb_add(dp, addr, vid);
2561 		else
2562 			err = dsa_port_fdb_add(dp, addr, vid);
2563 		if (err) {
2564 			dev_err(ds->dev,
2565 				"port %d failed to add %pM vid %d to fdb: %d\n",
2566 				dp->index, addr, vid, err);
2567 			break;
2568 		}
2569 		dsa_fdb_offload_notify(switchdev_work);
2570 		break;
2571 
2572 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2573 		if (switchdev_work->host_addr)
2574 			err = dsa_port_bridge_host_fdb_del(dp, addr, vid);
2575 		else if (dp->lag)
2576 			err = dsa_port_lag_fdb_del(dp, addr, vid);
2577 		else
2578 			err = dsa_port_fdb_del(dp, addr, vid);
2579 		if (err) {
2580 			dev_err(ds->dev,
2581 				"port %d failed to delete %pM vid %d from fdb: %d\n",
2582 				dp->index, addr, vid, err);
2583 		}
2584 
2585 		break;
2586 	}
2587 
2588 	kfree(switchdev_work);
2589 }
2590 
2591 static bool dsa_foreign_dev_check(const struct net_device *dev,
2592 				  const struct net_device *foreign_dev)
2593 {
2594 	const struct dsa_port *dp = dsa_slave_to_port(dev);
2595 	struct dsa_switch_tree *dst = dp->ds->dst;
2596 
2597 	if (netif_is_bridge_master(foreign_dev))
2598 		return !dsa_tree_offloads_bridge_dev(dst, foreign_dev);
2599 
2600 	if (netif_is_bridge_port(foreign_dev))
2601 		return !dsa_tree_offloads_bridge_port(dst, foreign_dev);
2602 
2603 	/* Everything else is foreign */
2604 	return true;
2605 }
2606 
2607 static int dsa_slave_fdb_event(struct net_device *dev,
2608 			       struct net_device *orig_dev,
2609 			       unsigned long event, const void *ctx,
2610 			       const struct switchdev_notifier_fdb_info *fdb_info)
2611 {
2612 	struct dsa_switchdev_event_work *switchdev_work;
2613 	struct dsa_port *dp = dsa_slave_to_port(dev);
2614 	bool host_addr = fdb_info->is_local;
2615 	struct dsa_switch *ds = dp->ds;
2616 
2617 	if (ctx && ctx != dp)
2618 		return 0;
2619 
2620 	if (switchdev_fdb_is_dynamically_learned(fdb_info)) {
2621 		if (dsa_port_offloads_bridge_port(dp, orig_dev))
2622 			return 0;
2623 
2624 		/* FDB entries learned by the software bridge or by foreign
2625 		 * bridge ports should be installed as host addresses only if
2626 		 * the driver requests assisted learning.
2627 		 */
2628 		if (!ds->assisted_learning_on_cpu_port)
2629 			return 0;
2630 	}
2631 
2632 	/* Also treat FDB entries on foreign interfaces bridged with us as host
2633 	 * addresses.
2634 	 */
2635 	if (dsa_foreign_dev_check(dev, orig_dev))
2636 		host_addr = true;
2637 
2638 	/* Check early that we're not doing work in vain.
2639 	 * Host addresses on LAG ports still require regular FDB ops,
2640 	 * since the CPU port isn't in a LAG.
2641 	 */
2642 	if (dp->lag && !host_addr) {
2643 		if (!ds->ops->lag_fdb_add || !ds->ops->lag_fdb_del)
2644 			return -EOPNOTSUPP;
2645 	} else {
2646 		if (!ds->ops->port_fdb_add || !ds->ops->port_fdb_del)
2647 			return -EOPNOTSUPP;
2648 	}
2649 
2650 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2651 	if (!switchdev_work)
2652 		return -ENOMEM;
2653 
2654 	netdev_dbg(dev, "%s FDB entry towards %s, addr %pM vid %d%s\n",
2655 		   event == SWITCHDEV_FDB_ADD_TO_DEVICE ? "Adding" : "Deleting",
2656 		   orig_dev->name, fdb_info->addr, fdb_info->vid,
2657 		   host_addr ? " as host address" : "");
2658 
2659 	INIT_WORK(&switchdev_work->work, dsa_slave_switchdev_event_work);
2660 	switchdev_work->event = event;
2661 	switchdev_work->dev = dev;
2662 	switchdev_work->orig_dev = orig_dev;
2663 
2664 	ether_addr_copy(switchdev_work->addr, fdb_info->addr);
2665 	switchdev_work->vid = fdb_info->vid;
2666 	switchdev_work->host_addr = host_addr;
2667 
2668 	dsa_schedule_work(&switchdev_work->work);
2669 
2670 	return 0;
2671 }
2672 
2673 /* Called under rcu_read_lock() */
2674 static int dsa_slave_switchdev_event(struct notifier_block *unused,
2675 				     unsigned long event, void *ptr)
2676 {
2677 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2678 	int err;
2679 
2680 	switch (event) {
2681 	case SWITCHDEV_PORT_ATTR_SET:
2682 		err = switchdev_handle_port_attr_set(dev, ptr,
2683 						     dsa_slave_dev_check,
2684 						     dsa_slave_port_attr_set);
2685 		return notifier_from_errno(err);
2686 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2687 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2688 		err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
2689 							   dsa_slave_dev_check,
2690 							   dsa_foreign_dev_check,
2691 							   dsa_slave_fdb_event);
2692 		return notifier_from_errno(err);
2693 	default:
2694 		return NOTIFY_DONE;
2695 	}
2696 
2697 	return NOTIFY_OK;
2698 }
2699 
2700 static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
2701 					      unsigned long event, void *ptr)
2702 {
2703 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2704 	int err;
2705 
2706 	switch (event) {
2707 	case SWITCHDEV_PORT_OBJ_ADD:
2708 		err = switchdev_handle_port_obj_add_foreign(dev, ptr,
2709 							    dsa_slave_dev_check,
2710 							    dsa_foreign_dev_check,
2711 							    dsa_slave_port_obj_add);
2712 		return notifier_from_errno(err);
2713 	case SWITCHDEV_PORT_OBJ_DEL:
2714 		err = switchdev_handle_port_obj_del_foreign(dev, ptr,
2715 							    dsa_slave_dev_check,
2716 							    dsa_foreign_dev_check,
2717 							    dsa_slave_port_obj_del);
2718 		return notifier_from_errno(err);
2719 	case SWITCHDEV_PORT_ATTR_SET:
2720 		err = switchdev_handle_port_attr_set(dev, ptr,
2721 						     dsa_slave_dev_check,
2722 						     dsa_slave_port_attr_set);
2723 		return notifier_from_errno(err);
2724 	}
2725 
2726 	return NOTIFY_DONE;
2727 }
2728 
2729 static struct notifier_block dsa_slave_nb __read_mostly = {
2730 	.notifier_call  = dsa_slave_netdevice_event,
2731 };
2732 
2733 struct notifier_block dsa_slave_switchdev_notifier = {
2734 	.notifier_call = dsa_slave_switchdev_event,
2735 };
2736 
2737 struct notifier_block dsa_slave_switchdev_blocking_notifier = {
2738 	.notifier_call = dsa_slave_switchdev_blocking_event,
2739 };
2740 
2741 int dsa_slave_register_notifier(void)
2742 {
2743 	struct notifier_block *nb;
2744 	int err;
2745 
2746 	err = register_netdevice_notifier(&dsa_slave_nb);
2747 	if (err)
2748 		return err;
2749 
2750 	err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
2751 	if (err)
2752 		goto err_switchdev_nb;
2753 
2754 	nb = &dsa_slave_switchdev_blocking_notifier;
2755 	err = register_switchdev_blocking_notifier(nb);
2756 	if (err)
2757 		goto err_switchdev_blocking_nb;
2758 
2759 	return 0;
2760 
2761 err_switchdev_blocking_nb:
2762 	unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
2763 err_switchdev_nb:
2764 	unregister_netdevice_notifier(&dsa_slave_nb);
2765 	return err;
2766 }
2767 
2768 void dsa_slave_unregister_notifier(void)
2769 {
2770 	struct notifier_block *nb;
2771 	int err;
2772 
2773 	nb = &dsa_slave_switchdev_blocking_notifier;
2774 	err = unregister_switchdev_blocking_notifier(nb);
2775 	if (err)
2776 		pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
2777 
2778 	err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
2779 	if (err)
2780 		pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
2781 
2782 	err = unregister_netdevice_notifier(&dsa_slave_nb);
2783 	if (err)
2784 		pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
2785 }
2786