xref: /openbmc/linux/net/dsa/slave.c (revision d538eca85c2aa6ecb5036e6ff47efc93d9f294da)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/dsa/slave.c - Slave device handling
4  * Copyright (c) 2008-2009 Marvell Semiconductor
5  */
6 
7 #include <linux/list.h>
8 #include <linux/etherdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/phy.h>
11 #include <linux/phy_fixed.h>
12 #include <linux/phylink.h>
13 #include <linux/of_net.h>
14 #include <linux/of_mdio.h>
15 #include <linux/mdio.h>
16 #include <net/rtnetlink.h>
17 #include <net/pkt_cls.h>
18 #include <net/selftests.h>
19 #include <net/tc_act/tc_mirred.h>
20 #include <linux/if_bridge.h>
21 #include <linux/if_hsr.h>
22 #include <net/dcbnl.h>
23 #include <linux/netpoll.h>
24 
25 #include "dsa_priv.h"
26 
27 static void dsa_slave_standalone_event_work(struct work_struct *work)
28 {
29 	struct dsa_standalone_event_work *standalone_work =
30 		container_of(work, struct dsa_standalone_event_work, work);
31 	const unsigned char *addr = standalone_work->addr;
32 	struct net_device *dev = standalone_work->dev;
33 	struct dsa_port *dp = dsa_slave_to_port(dev);
34 	struct switchdev_obj_port_mdb mdb;
35 	struct dsa_switch *ds = dp->ds;
36 	u16 vid = standalone_work->vid;
37 	int err;
38 
39 	switch (standalone_work->event) {
40 	case DSA_UC_ADD:
41 		err = dsa_port_standalone_host_fdb_add(dp, addr, vid);
42 		if (err) {
43 			dev_err(ds->dev,
44 				"port %d failed to add %pM vid %d to fdb: %d\n",
45 				dp->index, addr, vid, err);
46 			break;
47 		}
48 		break;
49 
50 	case DSA_UC_DEL:
51 		err = dsa_port_standalone_host_fdb_del(dp, addr, vid);
52 		if (err) {
53 			dev_err(ds->dev,
54 				"port %d failed to delete %pM vid %d from fdb: %d\n",
55 				dp->index, addr, vid, err);
56 		}
57 
58 		break;
59 	case DSA_MC_ADD:
60 		ether_addr_copy(mdb.addr, addr);
61 		mdb.vid = vid;
62 
63 		err = dsa_port_standalone_host_mdb_add(dp, &mdb);
64 		if (err) {
65 			dev_err(ds->dev,
66 				"port %d failed to add %pM vid %d to mdb: %d\n",
67 				dp->index, addr, vid, err);
68 			break;
69 		}
70 		break;
71 	case DSA_MC_DEL:
72 		ether_addr_copy(mdb.addr, addr);
73 		mdb.vid = vid;
74 
75 		err = dsa_port_standalone_host_mdb_del(dp, &mdb);
76 		if (err) {
77 			dev_err(ds->dev,
78 				"port %d failed to delete %pM vid %d from mdb: %d\n",
79 				dp->index, addr, vid, err);
80 		}
81 
82 		break;
83 	}
84 
85 	kfree(standalone_work);
86 }
87 
88 static int dsa_slave_schedule_standalone_work(struct net_device *dev,
89 					      enum dsa_standalone_event event,
90 					      const unsigned char *addr,
91 					      u16 vid)
92 {
93 	struct dsa_standalone_event_work *standalone_work;
94 
95 	standalone_work = kzalloc(sizeof(*standalone_work), GFP_ATOMIC);
96 	if (!standalone_work)
97 		return -ENOMEM;
98 
99 	INIT_WORK(&standalone_work->work, dsa_slave_standalone_event_work);
100 	standalone_work->event = event;
101 	standalone_work->dev = dev;
102 
103 	ether_addr_copy(standalone_work->addr, addr);
104 	standalone_work->vid = vid;
105 
106 	dsa_schedule_work(&standalone_work->work);
107 
108 	return 0;
109 }
110 
111 static int dsa_slave_sync_uc(struct net_device *dev,
112 			     const unsigned char *addr)
113 {
114 	return dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD, addr, 0);
115 }
116 
117 static int dsa_slave_unsync_uc(struct net_device *dev,
118 			       const unsigned char *addr)
119 {
120 	return dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL, addr, 0);
121 }
122 
123 static int dsa_slave_sync_mc(struct net_device *dev,
124 			     const unsigned char *addr)
125 {
126 	return dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, addr, 0);
127 }
128 
129 static int dsa_slave_unsync_mc(struct net_device *dev,
130 			       const unsigned char *addr)
131 {
132 	return dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, addr, 0);
133 }
134 
135 /* slave mii_bus handling ***************************************************/
136 static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
137 {
138 	struct dsa_switch *ds = bus->priv;
139 
140 	if (ds->phys_mii_mask & (1 << addr))
141 		return ds->ops->phy_read(ds, addr, reg);
142 
143 	return 0xffff;
144 }
145 
146 static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
147 {
148 	struct dsa_switch *ds = bus->priv;
149 
150 	if (ds->phys_mii_mask & (1 << addr))
151 		return ds->ops->phy_write(ds, addr, reg, val);
152 
153 	return 0;
154 }
155 
156 void dsa_slave_mii_bus_init(struct dsa_switch *ds)
157 {
158 	ds->slave_mii_bus->priv = (void *)ds;
159 	ds->slave_mii_bus->name = "dsa slave smi";
160 	ds->slave_mii_bus->read = dsa_slave_phy_read;
161 	ds->slave_mii_bus->write = dsa_slave_phy_write;
162 	snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
163 		 ds->dst->index, ds->index);
164 	ds->slave_mii_bus->parent = ds->dev;
165 	ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
166 }
167 
168 
169 /* slave device handling ****************************************************/
170 static int dsa_slave_get_iflink(const struct net_device *dev)
171 {
172 	return dsa_slave_to_master(dev)->ifindex;
173 }
174 
175 static int dsa_slave_open(struct net_device *dev)
176 {
177 	struct net_device *master = dsa_slave_to_master(dev);
178 	struct dsa_port *dp = dsa_slave_to_port(dev);
179 	struct dsa_switch *ds = dp->ds;
180 	int err;
181 
182 	err = dev_open(master, NULL);
183 	if (err < 0) {
184 		netdev_err(dev, "failed to open master %s\n", master->name);
185 		goto out;
186 	}
187 
188 	if (dsa_switch_supports_uc_filtering(ds)) {
189 		err = dsa_port_standalone_host_fdb_add(dp, dev->dev_addr, 0);
190 		if (err)
191 			goto out;
192 	}
193 
194 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
195 		err = dev_uc_add(master, dev->dev_addr);
196 		if (err < 0)
197 			goto del_host_addr;
198 	}
199 
200 	err = dsa_port_enable_rt(dp, dev->phydev);
201 	if (err)
202 		goto del_unicast;
203 
204 	return 0;
205 
206 del_unicast:
207 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
208 		dev_uc_del(master, dev->dev_addr);
209 del_host_addr:
210 	if (dsa_switch_supports_uc_filtering(ds))
211 		dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
212 out:
213 	return err;
214 }
215 
216 static int dsa_slave_close(struct net_device *dev)
217 {
218 	struct net_device *master = dsa_slave_to_master(dev);
219 	struct dsa_port *dp = dsa_slave_to_port(dev);
220 	struct dsa_switch *ds = dp->ds;
221 
222 	dsa_port_disable_rt(dp);
223 
224 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
225 		dev_uc_del(master, dev->dev_addr);
226 
227 	if (dsa_switch_supports_uc_filtering(ds))
228 		dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
229 
230 	return 0;
231 }
232 
233 /* Keep flooding enabled towards this port's CPU port as long as it serves at
234  * least one port in the tree that requires it.
235  */
236 static void dsa_port_manage_cpu_flood(struct dsa_port *dp)
237 {
238 	struct switchdev_brport_flags flags = {
239 		.mask = BR_FLOOD | BR_MCAST_FLOOD,
240 	};
241 	struct dsa_switch_tree *dst = dp->ds->dst;
242 	struct dsa_port *cpu_dp = dp->cpu_dp;
243 	struct dsa_port *other_dp;
244 	int err;
245 
246 	list_for_each_entry(other_dp, &dst->ports, list) {
247 		if (!dsa_port_is_user(other_dp))
248 			continue;
249 
250 		if (other_dp->cpu_dp != cpu_dp)
251 			continue;
252 
253 		if (other_dp->slave->flags & IFF_ALLMULTI)
254 			flags.val |= BR_MCAST_FLOOD;
255 		if (other_dp->slave->flags & IFF_PROMISC)
256 			flags.val |= BR_FLOOD;
257 	}
258 
259 	err = dsa_port_pre_bridge_flags(dp, flags, NULL);
260 	if (err)
261 		return;
262 
263 	dsa_port_bridge_flags(cpu_dp, flags, NULL);
264 }
265 
266 static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
267 {
268 	struct net_device *master = dsa_slave_to_master(dev);
269 	struct dsa_port *dp = dsa_slave_to_port(dev);
270 	struct dsa_switch *ds = dp->ds;
271 
272 	if (change & IFF_ALLMULTI)
273 		dev_set_allmulti(master,
274 				 dev->flags & IFF_ALLMULTI ? 1 : -1);
275 	if (change & IFF_PROMISC)
276 		dev_set_promiscuity(master,
277 				    dev->flags & IFF_PROMISC ? 1 : -1);
278 
279 	if (dsa_switch_supports_uc_filtering(ds) &&
280 	    dsa_switch_supports_mc_filtering(ds))
281 		dsa_port_manage_cpu_flood(dp);
282 }
283 
284 static void dsa_slave_set_rx_mode(struct net_device *dev)
285 {
286 	struct net_device *master = dsa_slave_to_master(dev);
287 	struct dsa_port *dp = dsa_slave_to_port(dev);
288 	struct dsa_switch *ds = dp->ds;
289 
290 	dev_mc_sync(master, dev);
291 	dev_uc_sync(master, dev);
292 	if (dsa_switch_supports_mc_filtering(ds))
293 		__dev_mc_sync(dev, dsa_slave_sync_mc, dsa_slave_unsync_mc);
294 	if (dsa_switch_supports_uc_filtering(ds))
295 		__dev_uc_sync(dev, dsa_slave_sync_uc, dsa_slave_unsync_uc);
296 }
297 
298 static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
299 {
300 	struct net_device *master = dsa_slave_to_master(dev);
301 	struct dsa_port *dp = dsa_slave_to_port(dev);
302 	struct dsa_switch *ds = dp->ds;
303 	struct sockaddr *addr = a;
304 	int err;
305 
306 	if (!is_valid_ether_addr(addr->sa_data))
307 		return -EADDRNOTAVAIL;
308 
309 	/* If the port is down, the address isn't synced yet to hardware or
310 	 * to the DSA master, so there is nothing to change.
311 	 */
312 	if (!(dev->flags & IFF_UP))
313 		goto out_change_dev_addr;
314 
315 	if (dsa_switch_supports_uc_filtering(ds)) {
316 		err = dsa_port_standalone_host_fdb_add(dp, addr->sa_data, 0);
317 		if (err)
318 			return err;
319 	}
320 
321 	if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
322 		err = dev_uc_add(master, addr->sa_data);
323 		if (err < 0)
324 			goto del_unicast;
325 	}
326 
327 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
328 		dev_uc_del(master, dev->dev_addr);
329 
330 	if (dsa_switch_supports_uc_filtering(ds))
331 		dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
332 
333 out_change_dev_addr:
334 	eth_hw_addr_set(dev, addr->sa_data);
335 
336 	return 0;
337 
338 del_unicast:
339 	if (dsa_switch_supports_uc_filtering(ds))
340 		dsa_port_standalone_host_fdb_del(dp, addr->sa_data, 0);
341 
342 	return err;
343 }
344 
345 struct dsa_slave_dump_ctx {
346 	struct net_device *dev;
347 	struct sk_buff *skb;
348 	struct netlink_callback *cb;
349 	int idx;
350 };
351 
352 static int
353 dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
354 			   bool is_static, void *data)
355 {
356 	struct dsa_slave_dump_ctx *dump = data;
357 	u32 portid = NETLINK_CB(dump->cb->skb).portid;
358 	u32 seq = dump->cb->nlh->nlmsg_seq;
359 	struct nlmsghdr *nlh;
360 	struct ndmsg *ndm;
361 
362 	if (dump->idx < dump->cb->args[2])
363 		goto skip;
364 
365 	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
366 			sizeof(*ndm), NLM_F_MULTI);
367 	if (!nlh)
368 		return -EMSGSIZE;
369 
370 	ndm = nlmsg_data(nlh);
371 	ndm->ndm_family  = AF_BRIDGE;
372 	ndm->ndm_pad1    = 0;
373 	ndm->ndm_pad2    = 0;
374 	ndm->ndm_flags   = NTF_SELF;
375 	ndm->ndm_type    = 0;
376 	ndm->ndm_ifindex = dump->dev->ifindex;
377 	ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;
378 
379 	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
380 		goto nla_put_failure;
381 
382 	if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
383 		goto nla_put_failure;
384 
385 	nlmsg_end(dump->skb, nlh);
386 
387 skip:
388 	dump->idx++;
389 	return 0;
390 
391 nla_put_failure:
392 	nlmsg_cancel(dump->skb, nlh);
393 	return -EMSGSIZE;
394 }
395 
396 static int
397 dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
398 		   struct net_device *dev, struct net_device *filter_dev,
399 		   int *idx)
400 {
401 	struct dsa_port *dp = dsa_slave_to_port(dev);
402 	struct dsa_slave_dump_ctx dump = {
403 		.dev = dev,
404 		.skb = skb,
405 		.cb = cb,
406 		.idx = *idx,
407 	};
408 	int err;
409 
410 	err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
411 	*idx = dump.idx;
412 
413 	return err;
414 }
415 
416 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
417 {
418 	struct dsa_slave_priv *p = netdev_priv(dev);
419 	struct dsa_switch *ds = p->dp->ds;
420 	int port = p->dp->index;
421 
422 	/* Pass through to switch driver if it supports timestamping */
423 	switch (cmd) {
424 	case SIOCGHWTSTAMP:
425 		if (ds->ops->port_hwtstamp_get)
426 			return ds->ops->port_hwtstamp_get(ds, port, ifr);
427 		break;
428 	case SIOCSHWTSTAMP:
429 		if (ds->ops->port_hwtstamp_set)
430 			return ds->ops->port_hwtstamp_set(ds, port, ifr);
431 		break;
432 	}
433 
434 	return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
435 }
436 
437 static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx,
438 				   const struct switchdev_attr *attr,
439 				   struct netlink_ext_ack *extack)
440 {
441 	struct dsa_port *dp = dsa_slave_to_port(dev);
442 	int ret;
443 
444 	if (ctx && ctx != dp)
445 		return 0;
446 
447 	switch (attr->id) {
448 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
449 		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
450 			return -EOPNOTSUPP;
451 
452 		ret = dsa_port_set_state(dp, attr->u.stp_state, true);
453 		break;
454 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
455 		if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
456 			return -EOPNOTSUPP;
457 
458 		ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
459 					      extack);
460 		break;
461 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
462 		if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
463 			return -EOPNOTSUPP;
464 
465 		ret = dsa_port_ageing_time(dp, attr->u.ageing_time);
466 		break;
467 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
468 		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
469 			return -EOPNOTSUPP;
470 
471 		ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
472 						extack);
473 		break;
474 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
475 		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
476 			return -EOPNOTSUPP;
477 
478 		ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack);
479 		break;
480 	default:
481 		ret = -EOPNOTSUPP;
482 		break;
483 	}
484 
485 	return ret;
486 }
487 
488 /* Must be called under rcu_read_lock() */
489 static int
490 dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave,
491 				      const struct switchdev_obj_port_vlan *vlan)
492 {
493 	struct net_device *upper_dev;
494 	struct list_head *iter;
495 
496 	netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
497 		u16 vid;
498 
499 		if (!is_vlan_dev(upper_dev))
500 			continue;
501 
502 		vid = vlan_dev_vlan_id(upper_dev);
503 		if (vid == vlan->vid)
504 			return -EBUSY;
505 	}
506 
507 	return 0;
508 }
509 
510 static int dsa_slave_vlan_add(struct net_device *dev,
511 			      const struct switchdev_obj *obj,
512 			      struct netlink_ext_ack *extack)
513 {
514 	struct dsa_port *dp = dsa_slave_to_port(dev);
515 	struct switchdev_obj_port_vlan *vlan;
516 	int err;
517 
518 	if (dsa_port_skip_vlan_configuration(dp)) {
519 		NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
520 		return 0;
521 	}
522 
523 	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
524 
525 	/* Deny adding a bridge VLAN when there is already an 802.1Q upper with
526 	 * the same VID.
527 	 */
528 	if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) {
529 		rcu_read_lock();
530 		err = dsa_slave_vlan_check_for_8021q_uppers(dev, vlan);
531 		rcu_read_unlock();
532 		if (err) {
533 			NL_SET_ERR_MSG_MOD(extack,
534 					   "Port already has a VLAN upper with this VID");
535 			return err;
536 		}
537 	}
538 
539 	return dsa_port_vlan_add(dp, vlan, extack);
540 }
541 
542 /* Offload a VLAN installed on the bridge or on a foreign interface by
543  * installing it as a VLAN towards the CPU port.
544  */
545 static int dsa_slave_host_vlan_add(struct net_device *dev,
546 				   const struct switchdev_obj *obj,
547 				   struct netlink_ext_ack *extack)
548 {
549 	struct dsa_port *dp = dsa_slave_to_port(dev);
550 	struct switchdev_obj_port_vlan vlan;
551 
552 	/* Do nothing if this is a software bridge */
553 	if (!dp->bridge)
554 		return -EOPNOTSUPP;
555 
556 	if (dsa_port_skip_vlan_configuration(dp)) {
557 		NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
558 		return 0;
559 	}
560 
561 	vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
562 
563 	/* Even though drivers often handle CPU membership in special ways,
564 	 * it doesn't make sense to program a PVID, so clear this flag.
565 	 */
566 	vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
567 
568 	return dsa_port_host_vlan_add(dp, &vlan, extack);
569 }
570 
571 static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
572 				  const struct switchdev_obj *obj,
573 				  struct netlink_ext_ack *extack)
574 {
575 	struct dsa_port *dp = dsa_slave_to_port(dev);
576 	int err;
577 
578 	if (ctx && ctx != dp)
579 		return 0;
580 
581 	switch (obj->id) {
582 	case SWITCHDEV_OBJ_ID_PORT_MDB:
583 		if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
584 			return -EOPNOTSUPP;
585 
586 		err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
587 		break;
588 	case SWITCHDEV_OBJ_ID_HOST_MDB:
589 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
590 			return -EOPNOTSUPP;
591 
592 		err = dsa_port_bridge_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
593 		break;
594 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
595 		if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
596 			err = dsa_slave_vlan_add(dev, obj, extack);
597 		else
598 			err = dsa_slave_host_vlan_add(dev, obj, extack);
599 		break;
600 	case SWITCHDEV_OBJ_ID_MRP:
601 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
602 			return -EOPNOTSUPP;
603 
604 		err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj));
605 		break;
606 	case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
607 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
608 			return -EOPNOTSUPP;
609 
610 		err = dsa_port_mrp_add_ring_role(dp,
611 						 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
612 		break;
613 	default:
614 		err = -EOPNOTSUPP;
615 		break;
616 	}
617 
618 	return err;
619 }
620 
621 static int dsa_slave_vlan_del(struct net_device *dev,
622 			      const struct switchdev_obj *obj)
623 {
624 	struct dsa_port *dp = dsa_slave_to_port(dev);
625 	struct switchdev_obj_port_vlan *vlan;
626 
627 	if (dsa_port_skip_vlan_configuration(dp))
628 		return 0;
629 
630 	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
631 
632 	return dsa_port_vlan_del(dp, vlan);
633 }
634 
635 static int dsa_slave_host_vlan_del(struct net_device *dev,
636 				   const struct switchdev_obj *obj)
637 {
638 	struct dsa_port *dp = dsa_slave_to_port(dev);
639 	struct switchdev_obj_port_vlan *vlan;
640 
641 	/* Do nothing if this is a software bridge */
642 	if (!dp->bridge)
643 		return -EOPNOTSUPP;
644 
645 	if (dsa_port_skip_vlan_configuration(dp))
646 		return 0;
647 
648 	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
649 
650 	return dsa_port_host_vlan_del(dp, vlan);
651 }
652 
653 static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
654 				  const struct switchdev_obj *obj)
655 {
656 	struct dsa_port *dp = dsa_slave_to_port(dev);
657 	int err;
658 
659 	if (ctx && ctx != dp)
660 		return 0;
661 
662 	switch (obj->id) {
663 	case SWITCHDEV_OBJ_ID_PORT_MDB:
664 		if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
665 			return -EOPNOTSUPP;
666 
667 		err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
668 		break;
669 	case SWITCHDEV_OBJ_ID_HOST_MDB:
670 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
671 			return -EOPNOTSUPP;
672 
673 		err = dsa_port_bridge_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
674 		break;
675 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
676 		if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
677 			err = dsa_slave_vlan_del(dev, obj);
678 		else
679 			err = dsa_slave_host_vlan_del(dev, obj);
680 		break;
681 	case SWITCHDEV_OBJ_ID_MRP:
682 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
683 			return -EOPNOTSUPP;
684 
685 		err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj));
686 		break;
687 	case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
688 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
689 			return -EOPNOTSUPP;
690 
691 		err = dsa_port_mrp_del_ring_role(dp,
692 						 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
693 		break;
694 	default:
695 		err = -EOPNOTSUPP;
696 		break;
697 	}
698 
699 	return err;
700 }
701 
702 static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
703 						     struct sk_buff *skb)
704 {
705 #ifdef CONFIG_NET_POLL_CONTROLLER
706 	struct dsa_slave_priv *p = netdev_priv(dev);
707 
708 	return netpoll_send_skb(p->netpoll, skb);
709 #else
710 	BUG();
711 	return NETDEV_TX_OK;
712 #endif
713 }
714 
715 static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
716 				 struct sk_buff *skb)
717 {
718 	struct dsa_switch *ds = p->dp->ds;
719 
720 	if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
721 		return;
722 
723 	if (!ds->ops->port_txtstamp)
724 		return;
725 
726 	ds->ops->port_txtstamp(ds, p->dp->index, skb);
727 }
728 
729 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
730 {
731 	/* SKB for netpoll still need to be mangled with the protocol-specific
732 	 * tag to be successfully transmitted
733 	 */
734 	if (unlikely(netpoll_tx_running(dev)))
735 		return dsa_slave_netpoll_send_skb(dev, skb);
736 
737 	/* Queue the SKB for transmission on the parent interface, but
738 	 * do not modify its EtherType
739 	 */
740 	skb->dev = dsa_slave_to_master(dev);
741 	dev_queue_xmit(skb);
742 
743 	return NETDEV_TX_OK;
744 }
745 EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
746 
747 static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
748 {
749 	int needed_headroom = dev->needed_headroom;
750 	int needed_tailroom = dev->needed_tailroom;
751 
752 	/* For tail taggers, we need to pad short frames ourselves, to ensure
753 	 * that the tail tag does not fail at its role of being at the end of
754 	 * the packet, once the master interface pads the frame. Account for
755 	 * that pad length here, and pad later.
756 	 */
757 	if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
758 		needed_tailroom += ETH_ZLEN - skb->len;
759 	/* skb_headroom() returns unsigned int... */
760 	needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
761 	needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
762 
763 	if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
764 		/* No reallocation needed, yay! */
765 		return 0;
766 
767 	return pskb_expand_head(skb, needed_headroom, needed_tailroom,
768 				GFP_ATOMIC);
769 }
770 
771 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
772 {
773 	struct dsa_slave_priv *p = netdev_priv(dev);
774 	struct sk_buff *nskb;
775 
776 	dev_sw_netstats_tx_add(dev, 1, skb->len);
777 
778 	memset(skb->cb, 0, sizeof(skb->cb));
779 
780 	/* Handle tx timestamp if any */
781 	dsa_skb_tx_timestamp(p, skb);
782 
783 	if (dsa_realloc_skb(skb, dev)) {
784 		dev_kfree_skb_any(skb);
785 		return NETDEV_TX_OK;
786 	}
787 
788 	/* needed_tailroom should still be 'warm' in the cache line from
789 	 * dsa_realloc_skb(), which has also ensured that padding is safe.
790 	 */
791 	if (dev->needed_tailroom)
792 		eth_skb_pad(skb);
793 
794 	/* Transmit function may have to reallocate the original SKB,
795 	 * in which case it must have freed it. Only free it here on error.
796 	 */
797 	nskb = p->xmit(skb, dev);
798 	if (!nskb) {
799 		kfree_skb(skb);
800 		return NETDEV_TX_OK;
801 	}
802 
803 	return dsa_enqueue_skb(nskb, dev);
804 }
805 
806 /* ethtool operations *******************************************************/
807 
808 static void dsa_slave_get_drvinfo(struct net_device *dev,
809 				  struct ethtool_drvinfo *drvinfo)
810 {
811 	strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
812 	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
813 	strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
814 }
815 
816 static int dsa_slave_get_regs_len(struct net_device *dev)
817 {
818 	struct dsa_port *dp = dsa_slave_to_port(dev);
819 	struct dsa_switch *ds = dp->ds;
820 
821 	if (ds->ops->get_regs_len)
822 		return ds->ops->get_regs_len(ds, dp->index);
823 
824 	return -EOPNOTSUPP;
825 }
826 
827 static void
828 dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
829 {
830 	struct dsa_port *dp = dsa_slave_to_port(dev);
831 	struct dsa_switch *ds = dp->ds;
832 
833 	if (ds->ops->get_regs)
834 		ds->ops->get_regs(ds, dp->index, regs, _p);
835 }
836 
837 static int dsa_slave_nway_reset(struct net_device *dev)
838 {
839 	struct dsa_port *dp = dsa_slave_to_port(dev);
840 
841 	return phylink_ethtool_nway_reset(dp->pl);
842 }
843 
844 static int dsa_slave_get_eeprom_len(struct net_device *dev)
845 {
846 	struct dsa_port *dp = dsa_slave_to_port(dev);
847 	struct dsa_switch *ds = dp->ds;
848 
849 	if (ds->cd && ds->cd->eeprom_len)
850 		return ds->cd->eeprom_len;
851 
852 	if (ds->ops->get_eeprom_len)
853 		return ds->ops->get_eeprom_len(ds);
854 
855 	return 0;
856 }
857 
858 static int dsa_slave_get_eeprom(struct net_device *dev,
859 				struct ethtool_eeprom *eeprom, u8 *data)
860 {
861 	struct dsa_port *dp = dsa_slave_to_port(dev);
862 	struct dsa_switch *ds = dp->ds;
863 
864 	if (ds->ops->get_eeprom)
865 		return ds->ops->get_eeprom(ds, eeprom, data);
866 
867 	return -EOPNOTSUPP;
868 }
869 
870 static int dsa_slave_set_eeprom(struct net_device *dev,
871 				struct ethtool_eeprom *eeprom, u8 *data)
872 {
873 	struct dsa_port *dp = dsa_slave_to_port(dev);
874 	struct dsa_switch *ds = dp->ds;
875 
876 	if (ds->ops->set_eeprom)
877 		return ds->ops->set_eeprom(ds, eeprom, data);
878 
879 	return -EOPNOTSUPP;
880 }
881 
882 static void dsa_slave_get_strings(struct net_device *dev,
883 				  uint32_t stringset, uint8_t *data)
884 {
885 	struct dsa_port *dp = dsa_slave_to_port(dev);
886 	struct dsa_switch *ds = dp->ds;
887 
888 	if (stringset == ETH_SS_STATS) {
889 		int len = ETH_GSTRING_LEN;
890 
891 		strncpy(data, "tx_packets", len);
892 		strncpy(data + len, "tx_bytes", len);
893 		strncpy(data + 2 * len, "rx_packets", len);
894 		strncpy(data + 3 * len, "rx_bytes", len);
895 		if (ds->ops->get_strings)
896 			ds->ops->get_strings(ds, dp->index, stringset,
897 					     data + 4 * len);
898 	} else if (stringset ==  ETH_SS_TEST) {
899 		net_selftest_get_strings(data);
900 	}
901 
902 }
903 
904 static void dsa_slave_get_ethtool_stats(struct net_device *dev,
905 					struct ethtool_stats *stats,
906 					uint64_t *data)
907 {
908 	struct dsa_port *dp = dsa_slave_to_port(dev);
909 	struct dsa_switch *ds = dp->ds;
910 	struct pcpu_sw_netstats *s;
911 	unsigned int start;
912 	int i;
913 
914 	for_each_possible_cpu(i) {
915 		u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
916 
917 		s = per_cpu_ptr(dev->tstats, i);
918 		do {
919 			start = u64_stats_fetch_begin_irq(&s->syncp);
920 			tx_packets = s->tx_packets;
921 			tx_bytes = s->tx_bytes;
922 			rx_packets = s->rx_packets;
923 			rx_bytes = s->rx_bytes;
924 		} while (u64_stats_fetch_retry_irq(&s->syncp, start));
925 		data[0] += tx_packets;
926 		data[1] += tx_bytes;
927 		data[2] += rx_packets;
928 		data[3] += rx_bytes;
929 	}
930 	if (ds->ops->get_ethtool_stats)
931 		ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
932 }
933 
934 static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
935 {
936 	struct dsa_port *dp = dsa_slave_to_port(dev);
937 	struct dsa_switch *ds = dp->ds;
938 
939 	if (sset == ETH_SS_STATS) {
940 		int count = 0;
941 
942 		if (ds->ops->get_sset_count) {
943 			count = ds->ops->get_sset_count(ds, dp->index, sset);
944 			if (count < 0)
945 				return count;
946 		}
947 
948 		return count + 4;
949 	} else if (sset ==  ETH_SS_TEST) {
950 		return net_selftest_get_count();
951 	}
952 
953 	return -EOPNOTSUPP;
954 }
955 
956 static void dsa_slave_get_eth_phy_stats(struct net_device *dev,
957 					struct ethtool_eth_phy_stats *phy_stats)
958 {
959 	struct dsa_port *dp = dsa_slave_to_port(dev);
960 	struct dsa_switch *ds = dp->ds;
961 
962 	if (ds->ops->get_eth_phy_stats)
963 		ds->ops->get_eth_phy_stats(ds, dp->index, phy_stats);
964 }
965 
966 static void dsa_slave_get_eth_mac_stats(struct net_device *dev,
967 					struct ethtool_eth_mac_stats *mac_stats)
968 {
969 	struct dsa_port *dp = dsa_slave_to_port(dev);
970 	struct dsa_switch *ds = dp->ds;
971 
972 	if (ds->ops->get_eth_mac_stats)
973 		ds->ops->get_eth_mac_stats(ds, dp->index, mac_stats);
974 }
975 
976 static void
977 dsa_slave_get_eth_ctrl_stats(struct net_device *dev,
978 			     struct ethtool_eth_ctrl_stats *ctrl_stats)
979 {
980 	struct dsa_port *dp = dsa_slave_to_port(dev);
981 	struct dsa_switch *ds = dp->ds;
982 
983 	if (ds->ops->get_eth_ctrl_stats)
984 		ds->ops->get_eth_ctrl_stats(ds, dp->index, ctrl_stats);
985 }
986 
987 static void dsa_slave_net_selftest(struct net_device *ndev,
988 				   struct ethtool_test *etest, u64 *buf)
989 {
990 	struct dsa_port *dp = dsa_slave_to_port(ndev);
991 	struct dsa_switch *ds = dp->ds;
992 
993 	if (ds->ops->self_test) {
994 		ds->ops->self_test(ds, dp->index, etest, buf);
995 		return;
996 	}
997 
998 	net_selftest(ndev, etest, buf);
999 }
1000 
1001 static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
1002 {
1003 	struct dsa_port *dp = dsa_slave_to_port(dev);
1004 	struct dsa_switch *ds = dp->ds;
1005 
1006 	phylink_ethtool_get_wol(dp->pl, w);
1007 
1008 	if (ds->ops->get_wol)
1009 		ds->ops->get_wol(ds, dp->index, w);
1010 }
1011 
1012 static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
1013 {
1014 	struct dsa_port *dp = dsa_slave_to_port(dev);
1015 	struct dsa_switch *ds = dp->ds;
1016 	int ret = -EOPNOTSUPP;
1017 
1018 	phylink_ethtool_set_wol(dp->pl, w);
1019 
1020 	if (ds->ops->set_wol)
1021 		ret = ds->ops->set_wol(ds, dp->index, w);
1022 
1023 	return ret;
1024 }
1025 
1026 static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
1027 {
1028 	struct dsa_port *dp = dsa_slave_to_port(dev);
1029 	struct dsa_switch *ds = dp->ds;
1030 	int ret;
1031 
1032 	/* Port's PHY and MAC both need to be EEE capable */
1033 	if (!dev->phydev || !dp->pl)
1034 		return -ENODEV;
1035 
1036 	if (!ds->ops->set_mac_eee)
1037 		return -EOPNOTSUPP;
1038 
1039 	ret = ds->ops->set_mac_eee(ds, dp->index, e);
1040 	if (ret)
1041 		return ret;
1042 
1043 	return phylink_ethtool_set_eee(dp->pl, e);
1044 }
1045 
1046 static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
1047 {
1048 	struct dsa_port *dp = dsa_slave_to_port(dev);
1049 	struct dsa_switch *ds = dp->ds;
1050 	int ret;
1051 
1052 	/* Port's PHY and MAC both need to be EEE capable */
1053 	if (!dev->phydev || !dp->pl)
1054 		return -ENODEV;
1055 
1056 	if (!ds->ops->get_mac_eee)
1057 		return -EOPNOTSUPP;
1058 
1059 	ret = ds->ops->get_mac_eee(ds, dp->index, e);
1060 	if (ret)
1061 		return ret;
1062 
1063 	return phylink_ethtool_get_eee(dp->pl, e);
1064 }
1065 
1066 static int dsa_slave_get_link_ksettings(struct net_device *dev,
1067 					struct ethtool_link_ksettings *cmd)
1068 {
1069 	struct dsa_port *dp = dsa_slave_to_port(dev);
1070 
1071 	return phylink_ethtool_ksettings_get(dp->pl, cmd);
1072 }
1073 
1074 static int dsa_slave_set_link_ksettings(struct net_device *dev,
1075 					const struct ethtool_link_ksettings *cmd)
1076 {
1077 	struct dsa_port *dp = dsa_slave_to_port(dev);
1078 
1079 	return phylink_ethtool_ksettings_set(dp->pl, cmd);
1080 }
1081 
1082 static void dsa_slave_get_pauseparam(struct net_device *dev,
1083 				     struct ethtool_pauseparam *pause)
1084 {
1085 	struct dsa_port *dp = dsa_slave_to_port(dev);
1086 
1087 	phylink_ethtool_get_pauseparam(dp->pl, pause);
1088 }
1089 
1090 static int dsa_slave_set_pauseparam(struct net_device *dev,
1091 				    struct ethtool_pauseparam *pause)
1092 {
1093 	struct dsa_port *dp = dsa_slave_to_port(dev);
1094 
1095 	return phylink_ethtool_set_pauseparam(dp->pl, pause);
1096 }
1097 
1098 #ifdef CONFIG_NET_POLL_CONTROLLER
1099 static int dsa_slave_netpoll_setup(struct net_device *dev,
1100 				   struct netpoll_info *ni)
1101 {
1102 	struct net_device *master = dsa_slave_to_master(dev);
1103 	struct dsa_slave_priv *p = netdev_priv(dev);
1104 	struct netpoll *netpoll;
1105 	int err = 0;
1106 
1107 	netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
1108 	if (!netpoll)
1109 		return -ENOMEM;
1110 
1111 	err = __netpoll_setup(netpoll, master);
1112 	if (err) {
1113 		kfree(netpoll);
1114 		goto out;
1115 	}
1116 
1117 	p->netpoll = netpoll;
1118 out:
1119 	return err;
1120 }
1121 
1122 static void dsa_slave_netpoll_cleanup(struct net_device *dev)
1123 {
1124 	struct dsa_slave_priv *p = netdev_priv(dev);
1125 	struct netpoll *netpoll = p->netpoll;
1126 
1127 	if (!netpoll)
1128 		return;
1129 
1130 	p->netpoll = NULL;
1131 
1132 	__netpoll_free(netpoll);
1133 }
1134 
1135 static void dsa_slave_poll_controller(struct net_device *dev)
1136 {
1137 }
1138 #endif
1139 
1140 static struct dsa_mall_tc_entry *
1141 dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
1142 {
1143 	struct dsa_slave_priv *p = netdev_priv(dev);
1144 	struct dsa_mall_tc_entry *mall_tc_entry;
1145 
1146 	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
1147 		if (mall_tc_entry->cookie == cookie)
1148 			return mall_tc_entry;
1149 
1150 	return NULL;
1151 }
1152 
1153 static int
1154 dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
1155 				  struct tc_cls_matchall_offload *cls,
1156 				  bool ingress)
1157 {
1158 	struct dsa_port *dp = dsa_slave_to_port(dev);
1159 	struct dsa_slave_priv *p = netdev_priv(dev);
1160 	struct dsa_mall_mirror_tc_entry *mirror;
1161 	struct dsa_mall_tc_entry *mall_tc_entry;
1162 	struct dsa_switch *ds = dp->ds;
1163 	struct flow_action_entry *act;
1164 	struct dsa_port *to_dp;
1165 	int err;
1166 
1167 	if (!ds->ops->port_mirror_add)
1168 		return -EOPNOTSUPP;
1169 
1170 	if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1171 					      cls->common.extack))
1172 		return -EOPNOTSUPP;
1173 
1174 	act = &cls->rule->action.entries[0];
1175 
1176 	if (!act->dev)
1177 		return -EINVAL;
1178 
1179 	if (!dsa_slave_dev_check(act->dev))
1180 		return -EOPNOTSUPP;
1181 
1182 	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1183 	if (!mall_tc_entry)
1184 		return -ENOMEM;
1185 
1186 	mall_tc_entry->cookie = cls->cookie;
1187 	mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
1188 	mirror = &mall_tc_entry->mirror;
1189 
1190 	to_dp = dsa_slave_to_port(act->dev);
1191 
1192 	mirror->to_local_port = to_dp->index;
1193 	mirror->ingress = ingress;
1194 
1195 	err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress);
1196 	if (err) {
1197 		kfree(mall_tc_entry);
1198 		return err;
1199 	}
1200 
1201 	list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1202 
1203 	return err;
1204 }
1205 
1206 static int
1207 dsa_slave_add_cls_matchall_police(struct net_device *dev,
1208 				  struct tc_cls_matchall_offload *cls,
1209 				  bool ingress)
1210 {
1211 	struct netlink_ext_ack *extack = cls->common.extack;
1212 	struct dsa_port *dp = dsa_slave_to_port(dev);
1213 	struct dsa_slave_priv *p = netdev_priv(dev);
1214 	struct dsa_mall_policer_tc_entry *policer;
1215 	struct dsa_mall_tc_entry *mall_tc_entry;
1216 	struct dsa_switch *ds = dp->ds;
1217 	struct flow_action_entry *act;
1218 	int err;
1219 
1220 	if (!ds->ops->port_policer_add) {
1221 		NL_SET_ERR_MSG_MOD(extack,
1222 				   "Policing offload not implemented");
1223 		return -EOPNOTSUPP;
1224 	}
1225 
1226 	if (!ingress) {
1227 		NL_SET_ERR_MSG_MOD(extack,
1228 				   "Only supported on ingress qdisc");
1229 		return -EOPNOTSUPP;
1230 	}
1231 
1232 	if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1233 					      cls->common.extack))
1234 		return -EOPNOTSUPP;
1235 
1236 	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
1237 		if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) {
1238 			NL_SET_ERR_MSG_MOD(extack,
1239 					   "Only one port policer allowed");
1240 			return -EEXIST;
1241 		}
1242 	}
1243 
1244 	act = &cls->rule->action.entries[0];
1245 
1246 	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1247 	if (!mall_tc_entry)
1248 		return -ENOMEM;
1249 
1250 	mall_tc_entry->cookie = cls->cookie;
1251 	mall_tc_entry->type = DSA_PORT_MALL_POLICER;
1252 	policer = &mall_tc_entry->policer;
1253 	policer->rate_bytes_per_sec = act->police.rate_bytes_ps;
1254 	policer->burst = act->police.burst;
1255 
1256 	err = ds->ops->port_policer_add(ds, dp->index, policer);
1257 	if (err) {
1258 		kfree(mall_tc_entry);
1259 		return err;
1260 	}
1261 
1262 	list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1263 
1264 	return err;
1265 }
1266 
1267 static int dsa_slave_add_cls_matchall(struct net_device *dev,
1268 				      struct tc_cls_matchall_offload *cls,
1269 				      bool ingress)
1270 {
1271 	int err = -EOPNOTSUPP;
1272 
1273 	if (cls->common.protocol == htons(ETH_P_ALL) &&
1274 	    flow_offload_has_one_action(&cls->rule->action) &&
1275 	    cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
1276 		err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress);
1277 	else if (flow_offload_has_one_action(&cls->rule->action) &&
1278 		 cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
1279 		err = dsa_slave_add_cls_matchall_police(dev, cls, ingress);
1280 
1281 	return err;
1282 }
1283 
1284 static void dsa_slave_del_cls_matchall(struct net_device *dev,
1285 				       struct tc_cls_matchall_offload *cls)
1286 {
1287 	struct dsa_port *dp = dsa_slave_to_port(dev);
1288 	struct dsa_mall_tc_entry *mall_tc_entry;
1289 	struct dsa_switch *ds = dp->ds;
1290 
1291 	mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
1292 	if (!mall_tc_entry)
1293 		return;
1294 
1295 	list_del(&mall_tc_entry->list);
1296 
1297 	switch (mall_tc_entry->type) {
1298 	case DSA_PORT_MALL_MIRROR:
1299 		if (ds->ops->port_mirror_del)
1300 			ds->ops->port_mirror_del(ds, dp->index,
1301 						 &mall_tc_entry->mirror);
1302 		break;
1303 	case DSA_PORT_MALL_POLICER:
1304 		if (ds->ops->port_policer_del)
1305 			ds->ops->port_policer_del(ds, dp->index);
1306 		break;
1307 	default:
1308 		WARN_ON(1);
1309 	}
1310 
1311 	kfree(mall_tc_entry);
1312 }
1313 
1314 static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
1315 					   struct tc_cls_matchall_offload *cls,
1316 					   bool ingress)
1317 {
1318 	if (cls->common.chain_index)
1319 		return -EOPNOTSUPP;
1320 
1321 	switch (cls->command) {
1322 	case TC_CLSMATCHALL_REPLACE:
1323 		return dsa_slave_add_cls_matchall(dev, cls, ingress);
1324 	case TC_CLSMATCHALL_DESTROY:
1325 		dsa_slave_del_cls_matchall(dev, cls);
1326 		return 0;
1327 	default:
1328 		return -EOPNOTSUPP;
1329 	}
1330 }
1331 
1332 static int dsa_slave_add_cls_flower(struct net_device *dev,
1333 				    struct flow_cls_offload *cls,
1334 				    bool ingress)
1335 {
1336 	struct dsa_port *dp = dsa_slave_to_port(dev);
1337 	struct dsa_switch *ds = dp->ds;
1338 	int port = dp->index;
1339 
1340 	if (!ds->ops->cls_flower_add)
1341 		return -EOPNOTSUPP;
1342 
1343 	return ds->ops->cls_flower_add(ds, port, cls, ingress);
1344 }
1345 
1346 static int dsa_slave_del_cls_flower(struct net_device *dev,
1347 				    struct flow_cls_offload *cls,
1348 				    bool ingress)
1349 {
1350 	struct dsa_port *dp = dsa_slave_to_port(dev);
1351 	struct dsa_switch *ds = dp->ds;
1352 	int port = dp->index;
1353 
1354 	if (!ds->ops->cls_flower_del)
1355 		return -EOPNOTSUPP;
1356 
1357 	return ds->ops->cls_flower_del(ds, port, cls, ingress);
1358 }
1359 
1360 static int dsa_slave_stats_cls_flower(struct net_device *dev,
1361 				      struct flow_cls_offload *cls,
1362 				      bool ingress)
1363 {
1364 	struct dsa_port *dp = dsa_slave_to_port(dev);
1365 	struct dsa_switch *ds = dp->ds;
1366 	int port = dp->index;
1367 
1368 	if (!ds->ops->cls_flower_stats)
1369 		return -EOPNOTSUPP;
1370 
1371 	return ds->ops->cls_flower_stats(ds, port, cls, ingress);
1372 }
1373 
1374 static int dsa_slave_setup_tc_cls_flower(struct net_device *dev,
1375 					 struct flow_cls_offload *cls,
1376 					 bool ingress)
1377 {
1378 	switch (cls->command) {
1379 	case FLOW_CLS_REPLACE:
1380 		return dsa_slave_add_cls_flower(dev, cls, ingress);
1381 	case FLOW_CLS_DESTROY:
1382 		return dsa_slave_del_cls_flower(dev, cls, ingress);
1383 	case FLOW_CLS_STATS:
1384 		return dsa_slave_stats_cls_flower(dev, cls, ingress);
1385 	default:
1386 		return -EOPNOTSUPP;
1387 	}
1388 }
1389 
1390 static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1391 				       void *cb_priv, bool ingress)
1392 {
1393 	struct net_device *dev = cb_priv;
1394 
1395 	if (!tc_can_offload(dev))
1396 		return -EOPNOTSUPP;
1397 
1398 	switch (type) {
1399 	case TC_SETUP_CLSMATCHALL:
1400 		return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
1401 	case TC_SETUP_CLSFLOWER:
1402 		return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress);
1403 	default:
1404 		return -EOPNOTSUPP;
1405 	}
1406 }
1407 
1408 static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
1409 					  void *type_data, void *cb_priv)
1410 {
1411 	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
1412 }
1413 
1414 static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
1415 					  void *type_data, void *cb_priv)
1416 {
1417 	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
1418 }
1419 
1420 static LIST_HEAD(dsa_slave_block_cb_list);
1421 
1422 static int dsa_slave_setup_tc_block(struct net_device *dev,
1423 				    struct flow_block_offload *f)
1424 {
1425 	struct flow_block_cb *block_cb;
1426 	flow_setup_cb_t *cb;
1427 
1428 	if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1429 		cb = dsa_slave_setup_tc_block_cb_ig;
1430 	else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1431 		cb = dsa_slave_setup_tc_block_cb_eg;
1432 	else
1433 		return -EOPNOTSUPP;
1434 
1435 	f->driver_block_list = &dsa_slave_block_cb_list;
1436 
1437 	switch (f->command) {
1438 	case FLOW_BLOCK_BIND:
1439 		if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
1440 			return -EBUSY;
1441 
1442 		block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
1443 		if (IS_ERR(block_cb))
1444 			return PTR_ERR(block_cb);
1445 
1446 		flow_block_cb_add(block_cb, f);
1447 		list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
1448 		return 0;
1449 	case FLOW_BLOCK_UNBIND:
1450 		block_cb = flow_block_cb_lookup(f->block, cb, dev);
1451 		if (!block_cb)
1452 			return -ENOENT;
1453 
1454 		flow_block_cb_remove(block_cb, f);
1455 		list_del(&block_cb->driver_list);
1456 		return 0;
1457 	default:
1458 		return -EOPNOTSUPP;
1459 	}
1460 }
1461 
1462 static int dsa_slave_setup_ft_block(struct dsa_switch *ds, int port,
1463 				    void *type_data)
1464 {
1465 	struct dsa_port *cpu_dp = dsa_to_port(ds, port)->cpu_dp;
1466 	struct net_device *master = cpu_dp->master;
1467 
1468 	if (!master->netdev_ops->ndo_setup_tc)
1469 		return -EOPNOTSUPP;
1470 
1471 	return master->netdev_ops->ndo_setup_tc(master, TC_SETUP_FT, type_data);
1472 }
1473 
1474 static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
1475 			      void *type_data)
1476 {
1477 	struct dsa_port *dp = dsa_slave_to_port(dev);
1478 	struct dsa_switch *ds = dp->ds;
1479 
1480 	switch (type) {
1481 	case TC_SETUP_BLOCK:
1482 		return dsa_slave_setup_tc_block(dev, type_data);
1483 	case TC_SETUP_FT:
1484 		return dsa_slave_setup_ft_block(ds, dp->index, type_data);
1485 	default:
1486 		break;
1487 	}
1488 
1489 	if (!ds->ops->port_setup_tc)
1490 		return -EOPNOTSUPP;
1491 
1492 	return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
1493 }
1494 
1495 static int dsa_slave_get_rxnfc(struct net_device *dev,
1496 			       struct ethtool_rxnfc *nfc, u32 *rule_locs)
1497 {
1498 	struct dsa_port *dp = dsa_slave_to_port(dev);
1499 	struct dsa_switch *ds = dp->ds;
1500 
1501 	if (!ds->ops->get_rxnfc)
1502 		return -EOPNOTSUPP;
1503 
1504 	return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
1505 }
1506 
1507 static int dsa_slave_set_rxnfc(struct net_device *dev,
1508 			       struct ethtool_rxnfc *nfc)
1509 {
1510 	struct dsa_port *dp = dsa_slave_to_port(dev);
1511 	struct dsa_switch *ds = dp->ds;
1512 
1513 	if (!ds->ops->set_rxnfc)
1514 		return -EOPNOTSUPP;
1515 
1516 	return ds->ops->set_rxnfc(ds, dp->index, nfc);
1517 }
1518 
1519 static int dsa_slave_get_ts_info(struct net_device *dev,
1520 				 struct ethtool_ts_info *ts)
1521 {
1522 	struct dsa_slave_priv *p = netdev_priv(dev);
1523 	struct dsa_switch *ds = p->dp->ds;
1524 
1525 	if (!ds->ops->get_ts_info)
1526 		return -EOPNOTSUPP;
1527 
1528 	return ds->ops->get_ts_info(ds, p->dp->index, ts);
1529 }
1530 
1531 static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
1532 				     u16 vid)
1533 {
1534 	struct dsa_port *dp = dsa_slave_to_port(dev);
1535 	struct switchdev_obj_port_vlan vlan = {
1536 		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
1537 		.vid = vid,
1538 		/* This API only allows programming tagged, non-PVID VIDs */
1539 		.flags = 0,
1540 	};
1541 	struct netlink_ext_ack extack = {0};
1542 	int ret;
1543 
1544 	/* User port... */
1545 	ret = dsa_port_vlan_add(dp, &vlan, &extack);
1546 	if (ret) {
1547 		if (extack._msg)
1548 			netdev_err(dev, "%s\n", extack._msg);
1549 		return ret;
1550 	}
1551 
1552 	/* And CPU port... */
1553 	ret = dsa_port_host_vlan_add(dp, &vlan, &extack);
1554 	if (ret) {
1555 		if (extack._msg)
1556 			netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index,
1557 				   extack._msg);
1558 		return ret;
1559 	}
1560 
1561 	return 0;
1562 }
1563 
1564 static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
1565 				      u16 vid)
1566 {
1567 	struct dsa_port *dp = dsa_slave_to_port(dev);
1568 	struct switchdev_obj_port_vlan vlan = {
1569 		.vid = vid,
1570 		/* This API only allows programming tagged, non-PVID VIDs */
1571 		.flags = 0,
1572 	};
1573 	int err;
1574 
1575 	err = dsa_port_vlan_del(dp, &vlan);
1576 	if (err)
1577 		return err;
1578 
1579 	return dsa_port_host_vlan_del(dp, &vlan);
1580 }
1581 
1582 static int dsa_slave_restore_vlan(struct net_device *vdev, int vid, void *arg)
1583 {
1584 	__be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
1585 
1586 	return dsa_slave_vlan_rx_add_vid(arg, proto, vid);
1587 }
1588 
1589 static int dsa_slave_clear_vlan(struct net_device *vdev, int vid, void *arg)
1590 {
1591 	__be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
1592 
1593 	return dsa_slave_vlan_rx_kill_vid(arg, proto, vid);
1594 }
1595 
1596 /* Keep the VLAN RX filtering list in sync with the hardware only if VLAN
1597  * filtering is enabled. The baseline is that only ports that offload a
1598  * VLAN-aware bridge are VLAN-aware, and standalone ports are VLAN-unaware,
1599  * but there are exceptions for quirky hardware.
1600  *
1601  * If ds->vlan_filtering_is_global = true, then standalone ports which share
1602  * the same switch with other ports that offload a VLAN-aware bridge are also
1603  * inevitably VLAN-aware.
1604  *
1605  * To summarize, a DSA switch port offloads:
1606  *
1607  * - If standalone (this includes software bridge, software LAG):
1608  *     - if ds->needs_standalone_vlan_filtering = true, OR if
1609  *       (ds->vlan_filtering_is_global = true AND there are bridges spanning
1610  *       this switch chip which have vlan_filtering=1)
1611  *         - the 8021q upper VLANs
1612  *     - else (standalone VLAN filtering is not needed, VLAN filtering is not
1613  *       global, or it is, but no port is under a VLAN-aware bridge):
1614  *         - no VLAN (any 8021q upper is a software VLAN)
1615  *
1616  * - If under a vlan_filtering=0 bridge which it offload:
1617  *     - if ds->configure_vlan_while_not_filtering = true (default):
1618  *         - the bridge VLANs. These VLANs are committed to hardware but inactive.
1619  *     - else (deprecated):
1620  *         - no VLAN. The bridge VLANs are not restored when VLAN awareness is
1621  *           enabled, so this behavior is broken and discouraged.
1622  *
1623  * - If under a vlan_filtering=1 bridge which it offload:
1624  *     - the bridge VLANs
1625  *     - the 8021q upper VLANs
1626  */
1627 int dsa_slave_manage_vlan_filtering(struct net_device *slave,
1628 				    bool vlan_filtering)
1629 {
1630 	int err;
1631 
1632 	if (vlan_filtering) {
1633 		slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1634 
1635 		err = vlan_for_each(slave, dsa_slave_restore_vlan, slave);
1636 		if (err) {
1637 			vlan_for_each(slave, dsa_slave_clear_vlan, slave);
1638 			slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1639 			return err;
1640 		}
1641 	} else {
1642 		err = vlan_for_each(slave, dsa_slave_clear_vlan, slave);
1643 		if (err)
1644 			return err;
1645 
1646 		slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1647 	}
1648 
1649 	return 0;
1650 }
1651 
1652 struct dsa_hw_port {
1653 	struct list_head list;
1654 	struct net_device *dev;
1655 	int old_mtu;
1656 };
1657 
1658 static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu)
1659 {
1660 	const struct dsa_hw_port *p;
1661 	int err;
1662 
1663 	list_for_each_entry(p, hw_port_list, list) {
1664 		if (p->dev->mtu == mtu)
1665 			continue;
1666 
1667 		err = dev_set_mtu(p->dev, mtu);
1668 		if (err)
1669 			goto rollback;
1670 	}
1671 
1672 	return 0;
1673 
1674 rollback:
1675 	list_for_each_entry_continue_reverse(p, hw_port_list, list) {
1676 		if (p->dev->mtu == p->old_mtu)
1677 			continue;
1678 
1679 		if (dev_set_mtu(p->dev, p->old_mtu))
1680 			netdev_err(p->dev, "Failed to restore MTU\n");
1681 	}
1682 
1683 	return err;
1684 }
1685 
1686 static void dsa_hw_port_list_free(struct list_head *hw_port_list)
1687 {
1688 	struct dsa_hw_port *p, *n;
1689 
1690 	list_for_each_entry_safe(p, n, hw_port_list, list)
1691 		kfree(p);
1692 }
1693 
1694 /* Make the hardware datapath to/from @dev limited to a common MTU */
1695 static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
1696 {
1697 	struct list_head hw_port_list;
1698 	struct dsa_switch_tree *dst;
1699 	int min_mtu = ETH_MAX_MTU;
1700 	struct dsa_port *other_dp;
1701 	int err;
1702 
1703 	if (!dp->ds->mtu_enforcement_ingress)
1704 		return;
1705 
1706 	if (!dp->bridge)
1707 		return;
1708 
1709 	INIT_LIST_HEAD(&hw_port_list);
1710 
1711 	/* Populate the list of ports that are part of the same bridge
1712 	 * as the newly added/modified port
1713 	 */
1714 	list_for_each_entry(dst, &dsa_tree_list, list) {
1715 		list_for_each_entry(other_dp, &dst->ports, list) {
1716 			struct dsa_hw_port *hw_port;
1717 			struct net_device *slave;
1718 
1719 			if (other_dp->type != DSA_PORT_TYPE_USER)
1720 				continue;
1721 
1722 			if (!dsa_port_bridge_same(dp, other_dp))
1723 				continue;
1724 
1725 			if (!other_dp->ds->mtu_enforcement_ingress)
1726 				continue;
1727 
1728 			slave = other_dp->slave;
1729 
1730 			if (min_mtu > slave->mtu)
1731 				min_mtu = slave->mtu;
1732 
1733 			hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
1734 			if (!hw_port)
1735 				goto out;
1736 
1737 			hw_port->dev = slave;
1738 			hw_port->old_mtu = slave->mtu;
1739 
1740 			list_add(&hw_port->list, &hw_port_list);
1741 		}
1742 	}
1743 
1744 	/* Attempt to configure the entire hardware bridge to the newly added
1745 	 * interface's MTU first, regardless of whether the intention of the
1746 	 * user was to raise or lower it.
1747 	 */
1748 	err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu);
1749 	if (!err)
1750 		goto out;
1751 
1752 	/* Clearly that didn't work out so well, so just set the minimum MTU on
1753 	 * all hardware bridge ports now. If this fails too, then all ports will
1754 	 * still have their old MTU rolled back anyway.
1755 	 */
1756 	dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu);
1757 
1758 out:
1759 	dsa_hw_port_list_free(&hw_port_list);
1760 }
1761 
1762 int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
1763 {
1764 	struct net_device *master = dsa_slave_to_master(dev);
1765 	struct dsa_port *dp = dsa_slave_to_port(dev);
1766 	struct dsa_slave_priv *p = netdev_priv(dev);
1767 	struct dsa_switch *ds = p->dp->ds;
1768 	struct dsa_port *dp_iter;
1769 	struct dsa_port *cpu_dp;
1770 	int port = p->dp->index;
1771 	int largest_mtu = 0;
1772 	int new_master_mtu;
1773 	int old_master_mtu;
1774 	int mtu_limit;
1775 	int cpu_mtu;
1776 	int err;
1777 
1778 	if (!ds->ops->port_change_mtu)
1779 		return -EOPNOTSUPP;
1780 
1781 	list_for_each_entry(dp_iter, &ds->dst->ports, list) {
1782 		int slave_mtu;
1783 
1784 		if (!dsa_port_is_user(dp_iter))
1785 			continue;
1786 
1787 		/* During probe, this function will be called for each slave
1788 		 * device, while not all of them have been allocated. That's
1789 		 * ok, it doesn't change what the maximum is, so ignore it.
1790 		 */
1791 		if (!dp_iter->slave)
1792 			continue;
1793 
1794 		/* Pretend that we already applied the setting, which we
1795 		 * actually haven't (still haven't done all integrity checks)
1796 		 */
1797 		if (dp_iter == dp)
1798 			slave_mtu = new_mtu;
1799 		else
1800 			slave_mtu = dp_iter->slave->mtu;
1801 
1802 		if (largest_mtu < slave_mtu)
1803 			largest_mtu = slave_mtu;
1804 	}
1805 
1806 	cpu_dp = dsa_to_port(ds, port)->cpu_dp;
1807 
1808 	mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
1809 	old_master_mtu = master->mtu;
1810 	new_master_mtu = largest_mtu + dsa_tag_protocol_overhead(cpu_dp->tag_ops);
1811 	if (new_master_mtu > mtu_limit)
1812 		return -ERANGE;
1813 
1814 	/* If the master MTU isn't over limit, there's no need to check the CPU
1815 	 * MTU, since that surely isn't either.
1816 	 */
1817 	cpu_mtu = largest_mtu;
1818 
1819 	/* Start applying stuff */
1820 	if (new_master_mtu != old_master_mtu) {
1821 		err = dev_set_mtu(master, new_master_mtu);
1822 		if (err < 0)
1823 			goto out_master_failed;
1824 
1825 		/* We only need to propagate the MTU of the CPU port to
1826 		 * upstream switches, so create a non-targeted notifier which
1827 		 * updates all switches.
1828 		 */
1829 		err = dsa_port_mtu_change(cpu_dp, cpu_mtu, false);
1830 		if (err)
1831 			goto out_cpu_failed;
1832 	}
1833 
1834 	err = dsa_port_mtu_change(dp, new_mtu, true);
1835 	if (err)
1836 		goto out_port_failed;
1837 
1838 	dev->mtu = new_mtu;
1839 
1840 	dsa_bridge_mtu_normalization(dp);
1841 
1842 	return 0;
1843 
1844 out_port_failed:
1845 	if (new_master_mtu != old_master_mtu)
1846 		dsa_port_mtu_change(cpu_dp, old_master_mtu -
1847 				    dsa_tag_protocol_overhead(cpu_dp->tag_ops),
1848 				    false);
1849 out_cpu_failed:
1850 	if (new_master_mtu != old_master_mtu)
1851 		dev_set_mtu(master, old_master_mtu);
1852 out_master_failed:
1853 	return err;
1854 }
1855 
1856 static int __maybe_unused
1857 dsa_slave_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app)
1858 {
1859 	struct dsa_port *dp = dsa_slave_to_port(dev);
1860 	struct dsa_switch *ds = dp->ds;
1861 	unsigned long mask, new_prio;
1862 	int err, port = dp->index;
1863 
1864 	if (!ds->ops->port_set_default_prio)
1865 		return -EOPNOTSUPP;
1866 
1867 	err = dcb_ieee_setapp(dev, app);
1868 	if (err)
1869 		return err;
1870 
1871 	mask = dcb_ieee_getapp_mask(dev, app);
1872 	new_prio = __fls(mask);
1873 
1874 	err = ds->ops->port_set_default_prio(ds, port, new_prio);
1875 	if (err) {
1876 		dcb_ieee_delapp(dev, app);
1877 		return err;
1878 	}
1879 
1880 	return 0;
1881 }
1882 
1883 static int __maybe_unused dsa_slave_dcbnl_ieee_setapp(struct net_device *dev,
1884 						      struct dcb_app *app)
1885 {
1886 	switch (app->selector) {
1887 	case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
1888 		switch (app->protocol) {
1889 		case 0:
1890 			return dsa_slave_dcbnl_set_default_prio(dev, app);
1891 		default:
1892 			return -EOPNOTSUPP;
1893 		}
1894 		break;
1895 	default:
1896 		return -EOPNOTSUPP;
1897 	}
1898 }
1899 
1900 static int __maybe_unused
1901 dsa_slave_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app)
1902 {
1903 	struct dsa_port *dp = dsa_slave_to_port(dev);
1904 	struct dsa_switch *ds = dp->ds;
1905 	unsigned long mask, new_prio;
1906 	int err, port = dp->index;
1907 
1908 	if (!ds->ops->port_set_default_prio)
1909 		return -EOPNOTSUPP;
1910 
1911 	err = dcb_ieee_delapp(dev, app);
1912 	if (err)
1913 		return err;
1914 
1915 	mask = dcb_ieee_getapp_mask(dev, app);
1916 	new_prio = mask ? __fls(mask) : 0;
1917 
1918 	err = ds->ops->port_set_default_prio(ds, port, new_prio);
1919 	if (err) {
1920 		dcb_ieee_setapp(dev, app);
1921 		return err;
1922 	}
1923 
1924 	return 0;
1925 }
1926 
1927 static int __maybe_unused dsa_slave_dcbnl_ieee_delapp(struct net_device *dev,
1928 						      struct dcb_app *app)
1929 {
1930 	switch (app->selector) {
1931 	case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
1932 		switch (app->protocol) {
1933 		case 0:
1934 			return dsa_slave_dcbnl_del_default_prio(dev, app);
1935 		default:
1936 			return -EOPNOTSUPP;
1937 		}
1938 		break;
1939 	default:
1940 		return -EOPNOTSUPP;
1941 	}
1942 }
1943 
1944 /* Pre-populate the DCB application priority table with the priorities
1945  * configured during switch setup, which we read from hardware here.
1946  */
1947 static int dsa_slave_dcbnl_init(struct net_device *dev)
1948 {
1949 	struct dsa_port *dp = dsa_slave_to_port(dev);
1950 	struct dsa_switch *ds = dp->ds;
1951 	int port = dp->index;
1952 	int err;
1953 
1954 	if (ds->ops->port_get_default_prio) {
1955 		int prio = ds->ops->port_get_default_prio(ds, port);
1956 		struct dcb_app app = {
1957 			.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
1958 			.protocol = 0,
1959 			.priority = prio,
1960 		};
1961 
1962 		if (prio < 0)
1963 			return prio;
1964 
1965 		err = dcb_ieee_setapp(dev, &app);
1966 		if (err)
1967 			return err;
1968 	}
1969 
1970 	return 0;
1971 }
1972 
1973 static const struct ethtool_ops dsa_slave_ethtool_ops = {
1974 	.get_drvinfo		= dsa_slave_get_drvinfo,
1975 	.get_regs_len		= dsa_slave_get_regs_len,
1976 	.get_regs		= dsa_slave_get_regs,
1977 	.nway_reset		= dsa_slave_nway_reset,
1978 	.get_link		= ethtool_op_get_link,
1979 	.get_eeprom_len		= dsa_slave_get_eeprom_len,
1980 	.get_eeprom		= dsa_slave_get_eeprom,
1981 	.set_eeprom		= dsa_slave_set_eeprom,
1982 	.get_strings		= dsa_slave_get_strings,
1983 	.get_ethtool_stats	= dsa_slave_get_ethtool_stats,
1984 	.get_sset_count		= dsa_slave_get_sset_count,
1985 	.get_eth_phy_stats	= dsa_slave_get_eth_phy_stats,
1986 	.get_eth_mac_stats	= dsa_slave_get_eth_mac_stats,
1987 	.get_eth_ctrl_stats	= dsa_slave_get_eth_ctrl_stats,
1988 	.set_wol		= dsa_slave_set_wol,
1989 	.get_wol		= dsa_slave_get_wol,
1990 	.set_eee		= dsa_slave_set_eee,
1991 	.get_eee		= dsa_slave_get_eee,
1992 	.get_link_ksettings	= dsa_slave_get_link_ksettings,
1993 	.set_link_ksettings	= dsa_slave_set_link_ksettings,
1994 	.get_pauseparam		= dsa_slave_get_pauseparam,
1995 	.set_pauseparam		= dsa_slave_set_pauseparam,
1996 	.get_rxnfc		= dsa_slave_get_rxnfc,
1997 	.set_rxnfc		= dsa_slave_set_rxnfc,
1998 	.get_ts_info		= dsa_slave_get_ts_info,
1999 	.self_test		= dsa_slave_net_selftest,
2000 };
2001 
2002 static const struct dcbnl_rtnl_ops __maybe_unused dsa_slave_dcbnl_ops = {
2003 	.ieee_setapp		= dsa_slave_dcbnl_ieee_setapp,
2004 	.ieee_delapp		= dsa_slave_dcbnl_ieee_delapp,
2005 };
2006 
2007 static struct devlink_port *dsa_slave_get_devlink_port(struct net_device *dev)
2008 {
2009 	struct dsa_port *dp = dsa_slave_to_port(dev);
2010 
2011 	return &dp->devlink_port;
2012 }
2013 
2014 static void dsa_slave_get_stats64(struct net_device *dev,
2015 				  struct rtnl_link_stats64 *s)
2016 {
2017 	struct dsa_port *dp = dsa_slave_to_port(dev);
2018 	struct dsa_switch *ds = dp->ds;
2019 
2020 	if (ds->ops->get_stats64)
2021 		ds->ops->get_stats64(ds, dp->index, s);
2022 	else
2023 		dev_get_tstats64(dev, s);
2024 }
2025 
2026 static int dsa_slave_fill_forward_path(struct net_device_path_ctx *ctx,
2027 				       struct net_device_path *path)
2028 {
2029 	struct dsa_port *dp = dsa_slave_to_port(ctx->dev);
2030 	struct dsa_port *cpu_dp = dp->cpu_dp;
2031 
2032 	path->dev = ctx->dev;
2033 	path->type = DEV_PATH_DSA;
2034 	path->dsa.proto = cpu_dp->tag_ops->proto;
2035 	path->dsa.port = dp->index;
2036 	ctx->dev = cpu_dp->master;
2037 
2038 	return 0;
2039 }
2040 
2041 static const struct net_device_ops dsa_slave_netdev_ops = {
2042 	.ndo_open	 	= dsa_slave_open,
2043 	.ndo_stop		= dsa_slave_close,
2044 	.ndo_start_xmit		= dsa_slave_xmit,
2045 	.ndo_change_rx_flags	= dsa_slave_change_rx_flags,
2046 	.ndo_set_rx_mode	= dsa_slave_set_rx_mode,
2047 	.ndo_set_mac_address	= dsa_slave_set_mac_address,
2048 	.ndo_fdb_dump		= dsa_slave_fdb_dump,
2049 	.ndo_eth_ioctl		= dsa_slave_ioctl,
2050 	.ndo_get_iflink		= dsa_slave_get_iflink,
2051 #ifdef CONFIG_NET_POLL_CONTROLLER
2052 	.ndo_netpoll_setup	= dsa_slave_netpoll_setup,
2053 	.ndo_netpoll_cleanup	= dsa_slave_netpoll_cleanup,
2054 	.ndo_poll_controller	= dsa_slave_poll_controller,
2055 #endif
2056 	.ndo_setup_tc		= dsa_slave_setup_tc,
2057 	.ndo_get_stats64	= dsa_slave_get_stats64,
2058 	.ndo_vlan_rx_add_vid	= dsa_slave_vlan_rx_add_vid,
2059 	.ndo_vlan_rx_kill_vid	= dsa_slave_vlan_rx_kill_vid,
2060 	.ndo_get_devlink_port	= dsa_slave_get_devlink_port,
2061 	.ndo_change_mtu		= dsa_slave_change_mtu,
2062 	.ndo_fill_forward_path	= dsa_slave_fill_forward_path,
2063 };
2064 
2065 static struct device_type dsa_type = {
2066 	.name	= "dsa",
2067 };
2068 
2069 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
2070 {
2071 	const struct dsa_port *dp = dsa_to_port(ds, port);
2072 
2073 	if (dp->pl)
2074 		phylink_mac_change(dp->pl, up);
2075 }
2076 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
2077 
2078 static void dsa_slave_phylink_fixed_state(struct phylink_config *config,
2079 					  struct phylink_link_state *state)
2080 {
2081 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
2082 	struct dsa_switch *ds = dp->ds;
2083 
2084 	/* No need to check that this operation is valid, the callback would
2085 	 * not be called if it was not.
2086 	 */
2087 	ds->ops->phylink_fixed_state(ds, dp->index, state);
2088 }
2089 
2090 /* slave device setup *******************************************************/
2091 static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr,
2092 				 u32 flags)
2093 {
2094 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2095 	struct dsa_switch *ds = dp->ds;
2096 
2097 	slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
2098 	if (!slave_dev->phydev) {
2099 		netdev_err(slave_dev, "no phy at %d\n", addr);
2100 		return -ENODEV;
2101 	}
2102 
2103 	slave_dev->phydev->dev_flags |= flags;
2104 
2105 	return phylink_connect_phy(dp->pl, slave_dev->phydev);
2106 }
2107 
2108 static int dsa_slave_phy_setup(struct net_device *slave_dev)
2109 {
2110 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2111 	struct device_node *port_dn = dp->dn;
2112 	struct dsa_switch *ds = dp->ds;
2113 	u32 phy_flags = 0;
2114 	int ret;
2115 
2116 	dp->pl_config.dev = &slave_dev->dev;
2117 	dp->pl_config.type = PHYLINK_NETDEV;
2118 
2119 	/* The get_fixed_state callback takes precedence over polling the
2120 	 * link GPIO in PHYLINK (see phylink_get_fixed_state).  Only set
2121 	 * this if the switch provides such a callback.
2122 	 */
2123 	if (ds->ops->phylink_fixed_state) {
2124 		dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state;
2125 		dp->pl_config.poll_fixed_state = true;
2126 	}
2127 
2128 	ret = dsa_port_phylink_create(dp);
2129 	if (ret)
2130 		return ret;
2131 
2132 	if (ds->ops->get_phy_flags)
2133 		phy_flags = ds->ops->get_phy_flags(ds, dp->index);
2134 
2135 	ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
2136 	if (ret == -ENODEV && ds->slave_mii_bus) {
2137 		/* We could not connect to a designated PHY or SFP, so try to
2138 		 * use the switch internal MDIO bus instead
2139 		 */
2140 		ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags);
2141 	}
2142 	if (ret) {
2143 		netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
2144 			   ERR_PTR(ret));
2145 		phylink_destroy(dp->pl);
2146 	}
2147 
2148 	return ret;
2149 }
2150 
2151 void dsa_slave_setup_tagger(struct net_device *slave)
2152 {
2153 	struct dsa_port *dp = dsa_slave_to_port(slave);
2154 	struct dsa_slave_priv *p = netdev_priv(slave);
2155 	const struct dsa_port *cpu_dp = dp->cpu_dp;
2156 	struct net_device *master = cpu_dp->master;
2157 	const struct dsa_switch *ds = dp->ds;
2158 
2159 	slave->needed_headroom = cpu_dp->tag_ops->needed_headroom;
2160 	slave->needed_tailroom = cpu_dp->tag_ops->needed_tailroom;
2161 	/* Try to save one extra realloc later in the TX path (in the master)
2162 	 * by also inheriting the master's needed headroom and tailroom.
2163 	 * The 8021q driver also does this.
2164 	 */
2165 	slave->needed_headroom += master->needed_headroom;
2166 	slave->needed_tailroom += master->needed_tailroom;
2167 
2168 	p->xmit = cpu_dp->tag_ops->xmit;
2169 
2170 	slave->features = master->vlan_features | NETIF_F_HW_TC;
2171 	slave->hw_features |= NETIF_F_HW_TC;
2172 	slave->features |= NETIF_F_LLTX;
2173 	if (slave->needed_tailroom)
2174 		slave->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
2175 	if (ds->needs_standalone_vlan_filtering)
2176 		slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2177 }
2178 
2179 int dsa_slave_suspend(struct net_device *slave_dev)
2180 {
2181 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2182 
2183 	if (!netif_running(slave_dev))
2184 		return 0;
2185 
2186 	netif_device_detach(slave_dev);
2187 
2188 	rtnl_lock();
2189 	phylink_stop(dp->pl);
2190 	rtnl_unlock();
2191 
2192 	return 0;
2193 }
2194 
2195 int dsa_slave_resume(struct net_device *slave_dev)
2196 {
2197 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2198 
2199 	if (!netif_running(slave_dev))
2200 		return 0;
2201 
2202 	netif_device_attach(slave_dev);
2203 
2204 	rtnl_lock();
2205 	phylink_start(dp->pl);
2206 	rtnl_unlock();
2207 
2208 	return 0;
2209 }
2210 
2211 int dsa_slave_create(struct dsa_port *port)
2212 {
2213 	const struct dsa_port *cpu_dp = port->cpu_dp;
2214 	struct net_device *master = cpu_dp->master;
2215 	struct dsa_switch *ds = port->ds;
2216 	const char *name = port->name;
2217 	struct net_device *slave_dev;
2218 	struct dsa_slave_priv *p;
2219 	int ret;
2220 
2221 	if (!ds->num_tx_queues)
2222 		ds->num_tx_queues = 1;
2223 
2224 	slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
2225 				     NET_NAME_UNKNOWN, ether_setup,
2226 				     ds->num_tx_queues, 1);
2227 	if (slave_dev == NULL)
2228 		return -ENOMEM;
2229 
2230 	slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
2231 #if IS_ENABLED(CONFIG_DCB)
2232 	slave_dev->dcbnl_ops = &dsa_slave_dcbnl_ops;
2233 #endif
2234 	if (!is_zero_ether_addr(port->mac))
2235 		eth_hw_addr_set(slave_dev, port->mac);
2236 	else
2237 		eth_hw_addr_inherit(slave_dev, master);
2238 	slave_dev->priv_flags |= IFF_NO_QUEUE;
2239 	if (dsa_switch_supports_uc_filtering(ds))
2240 		slave_dev->priv_flags |= IFF_UNICAST_FLT;
2241 	slave_dev->netdev_ops = &dsa_slave_netdev_ops;
2242 	if (ds->ops->port_max_mtu)
2243 		slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
2244 	SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
2245 
2246 	SET_NETDEV_DEV(slave_dev, port->ds->dev);
2247 	slave_dev->dev.of_node = port->dn;
2248 	slave_dev->vlan_features = master->vlan_features;
2249 
2250 	p = netdev_priv(slave_dev);
2251 	slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2252 	if (!slave_dev->tstats) {
2253 		free_netdev(slave_dev);
2254 		return -ENOMEM;
2255 	}
2256 
2257 	ret = gro_cells_init(&p->gcells, slave_dev);
2258 	if (ret)
2259 		goto out_free;
2260 
2261 	p->dp = port;
2262 	INIT_LIST_HEAD(&p->mall_tc_list);
2263 	port->slave = slave_dev;
2264 	dsa_slave_setup_tagger(slave_dev);
2265 
2266 	netif_carrier_off(slave_dev);
2267 
2268 	ret = dsa_slave_phy_setup(slave_dev);
2269 	if (ret) {
2270 		netdev_err(slave_dev,
2271 			   "error %d setting up PHY for tree %d, switch %d, port %d\n",
2272 			   ret, ds->dst->index, ds->index, port->index);
2273 		goto out_gcells;
2274 	}
2275 
2276 	rtnl_lock();
2277 
2278 	ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
2279 	if (ret && ret != -EOPNOTSUPP)
2280 		dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
2281 			 ret, ETH_DATA_LEN, port->index);
2282 
2283 	ret = register_netdevice(slave_dev);
2284 	if (ret) {
2285 		netdev_err(master, "error %d registering interface %s\n",
2286 			   ret, slave_dev->name);
2287 		rtnl_unlock();
2288 		goto out_phy;
2289 	}
2290 
2291 	if (IS_ENABLED(CONFIG_DCB)) {
2292 		ret = dsa_slave_dcbnl_init(slave_dev);
2293 		if (ret) {
2294 			netdev_err(slave_dev,
2295 				   "failed to initialize DCB: %pe\n",
2296 				   ERR_PTR(ret));
2297 			rtnl_unlock();
2298 			goto out_unregister;
2299 		}
2300 	}
2301 
2302 	ret = netdev_upper_dev_link(master, slave_dev, NULL);
2303 
2304 	rtnl_unlock();
2305 
2306 	if (ret)
2307 		goto out_unregister;
2308 
2309 	return 0;
2310 
2311 out_unregister:
2312 	unregister_netdev(slave_dev);
2313 out_phy:
2314 	rtnl_lock();
2315 	phylink_disconnect_phy(p->dp->pl);
2316 	rtnl_unlock();
2317 	phylink_destroy(p->dp->pl);
2318 out_gcells:
2319 	gro_cells_destroy(&p->gcells);
2320 out_free:
2321 	free_percpu(slave_dev->tstats);
2322 	free_netdev(slave_dev);
2323 	port->slave = NULL;
2324 	return ret;
2325 }
2326 
2327 void dsa_slave_destroy(struct net_device *slave_dev)
2328 {
2329 	struct net_device *master = dsa_slave_to_master(slave_dev);
2330 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2331 	struct dsa_slave_priv *p = netdev_priv(slave_dev);
2332 
2333 	netif_carrier_off(slave_dev);
2334 	rtnl_lock();
2335 	netdev_upper_dev_unlink(master, slave_dev);
2336 	unregister_netdevice(slave_dev);
2337 	phylink_disconnect_phy(dp->pl);
2338 	rtnl_unlock();
2339 
2340 	phylink_destroy(dp->pl);
2341 	gro_cells_destroy(&p->gcells);
2342 	free_percpu(slave_dev->tstats);
2343 	free_netdev(slave_dev);
2344 }
2345 
2346 bool dsa_slave_dev_check(const struct net_device *dev)
2347 {
2348 	return dev->netdev_ops == &dsa_slave_netdev_ops;
2349 }
2350 EXPORT_SYMBOL_GPL(dsa_slave_dev_check);
2351 
2352 static int dsa_slave_changeupper(struct net_device *dev,
2353 				 struct netdev_notifier_changeupper_info *info)
2354 {
2355 	struct dsa_port *dp = dsa_slave_to_port(dev);
2356 	struct netlink_ext_ack *extack;
2357 	int err = NOTIFY_DONE;
2358 
2359 	extack = netdev_notifier_info_to_extack(&info->info);
2360 
2361 	if (netif_is_bridge_master(info->upper_dev)) {
2362 		if (info->linking) {
2363 			err = dsa_port_bridge_join(dp, info->upper_dev, extack);
2364 			if (!err)
2365 				dsa_bridge_mtu_normalization(dp);
2366 			if (err == -EOPNOTSUPP) {
2367 				NL_SET_ERR_MSG_MOD(extack,
2368 						   "Offloading not supported");
2369 				err = 0;
2370 			}
2371 			err = notifier_from_errno(err);
2372 		} else {
2373 			dsa_port_bridge_leave(dp, info->upper_dev);
2374 			err = NOTIFY_OK;
2375 		}
2376 	} else if (netif_is_lag_master(info->upper_dev)) {
2377 		if (info->linking) {
2378 			err = dsa_port_lag_join(dp, info->upper_dev,
2379 						info->upper_info, extack);
2380 			if (err == -EOPNOTSUPP) {
2381 				NL_SET_ERR_MSG_MOD(info->info.extack,
2382 						   "Offloading not supported");
2383 				err = 0;
2384 			}
2385 			err = notifier_from_errno(err);
2386 		} else {
2387 			dsa_port_lag_leave(dp, info->upper_dev);
2388 			err = NOTIFY_OK;
2389 		}
2390 	} else if (is_hsr_master(info->upper_dev)) {
2391 		if (info->linking) {
2392 			err = dsa_port_hsr_join(dp, info->upper_dev);
2393 			if (err == -EOPNOTSUPP) {
2394 				NL_SET_ERR_MSG_MOD(info->info.extack,
2395 						   "Offloading not supported");
2396 				err = 0;
2397 			}
2398 			err = notifier_from_errno(err);
2399 		} else {
2400 			dsa_port_hsr_leave(dp, info->upper_dev);
2401 			err = NOTIFY_OK;
2402 		}
2403 	}
2404 
2405 	return err;
2406 }
2407 
2408 static int dsa_slave_prechangeupper(struct net_device *dev,
2409 				    struct netdev_notifier_changeupper_info *info)
2410 {
2411 	struct dsa_port *dp = dsa_slave_to_port(dev);
2412 
2413 	if (netif_is_bridge_master(info->upper_dev) && !info->linking)
2414 		dsa_port_pre_bridge_leave(dp, info->upper_dev);
2415 	else if (netif_is_lag_master(info->upper_dev) && !info->linking)
2416 		dsa_port_pre_lag_leave(dp, info->upper_dev);
2417 	/* dsa_port_pre_hsr_leave is not yet necessary since hsr cannot be
2418 	 * meaningfully enslaved to a bridge yet
2419 	 */
2420 
2421 	return NOTIFY_DONE;
2422 }
2423 
2424 static int
2425 dsa_slave_lag_changeupper(struct net_device *dev,
2426 			  struct netdev_notifier_changeupper_info *info)
2427 {
2428 	struct net_device *lower;
2429 	struct list_head *iter;
2430 	int err = NOTIFY_DONE;
2431 	struct dsa_port *dp;
2432 
2433 	netdev_for_each_lower_dev(dev, lower, iter) {
2434 		if (!dsa_slave_dev_check(lower))
2435 			continue;
2436 
2437 		dp = dsa_slave_to_port(lower);
2438 		if (!dp->lag)
2439 			/* Software LAG */
2440 			continue;
2441 
2442 		err = dsa_slave_changeupper(lower, info);
2443 		if (notifier_to_errno(err))
2444 			break;
2445 	}
2446 
2447 	return err;
2448 }
2449 
2450 /* Same as dsa_slave_lag_changeupper() except that it calls
2451  * dsa_slave_prechangeupper()
2452  */
2453 static int
2454 dsa_slave_lag_prechangeupper(struct net_device *dev,
2455 			     struct netdev_notifier_changeupper_info *info)
2456 {
2457 	struct net_device *lower;
2458 	struct list_head *iter;
2459 	int err = NOTIFY_DONE;
2460 	struct dsa_port *dp;
2461 
2462 	netdev_for_each_lower_dev(dev, lower, iter) {
2463 		if (!dsa_slave_dev_check(lower))
2464 			continue;
2465 
2466 		dp = dsa_slave_to_port(lower);
2467 		if (!dp->lag)
2468 			/* Software LAG */
2469 			continue;
2470 
2471 		err = dsa_slave_prechangeupper(lower, info);
2472 		if (notifier_to_errno(err))
2473 			break;
2474 	}
2475 
2476 	return err;
2477 }
2478 
2479 static int
2480 dsa_prevent_bridging_8021q_upper(struct net_device *dev,
2481 				 struct netdev_notifier_changeupper_info *info)
2482 {
2483 	struct netlink_ext_ack *ext_ack;
2484 	struct net_device *slave, *br;
2485 	struct dsa_port *dp;
2486 
2487 	ext_ack = netdev_notifier_info_to_extack(&info->info);
2488 
2489 	if (!is_vlan_dev(dev))
2490 		return NOTIFY_DONE;
2491 
2492 	slave = vlan_dev_real_dev(dev);
2493 	if (!dsa_slave_dev_check(slave))
2494 		return NOTIFY_DONE;
2495 
2496 	dp = dsa_slave_to_port(slave);
2497 	br = dsa_port_bridge_dev_get(dp);
2498 	if (!br)
2499 		return NOTIFY_DONE;
2500 
2501 	/* Deny enslaving a VLAN device into a VLAN-aware bridge */
2502 	if (br_vlan_enabled(br) &&
2503 	    netif_is_bridge_master(info->upper_dev) && info->linking) {
2504 		NL_SET_ERR_MSG_MOD(ext_ack,
2505 				   "Cannot enslave VLAN device into VLAN aware bridge");
2506 		return notifier_from_errno(-EINVAL);
2507 	}
2508 
2509 	return NOTIFY_DONE;
2510 }
2511 
2512 static int
2513 dsa_slave_check_8021q_upper(struct net_device *dev,
2514 			    struct netdev_notifier_changeupper_info *info)
2515 {
2516 	struct dsa_port *dp = dsa_slave_to_port(dev);
2517 	struct net_device *br = dsa_port_bridge_dev_get(dp);
2518 	struct bridge_vlan_info br_info;
2519 	struct netlink_ext_ack *extack;
2520 	int err = NOTIFY_DONE;
2521 	u16 vid;
2522 
2523 	if (!br || !br_vlan_enabled(br))
2524 		return NOTIFY_DONE;
2525 
2526 	extack = netdev_notifier_info_to_extack(&info->info);
2527 	vid = vlan_dev_vlan_id(info->upper_dev);
2528 
2529 	/* br_vlan_get_info() returns -EINVAL or -ENOENT if the
2530 	 * device, respectively the VID is not found, returning
2531 	 * 0 means success, which is a failure for us here.
2532 	 */
2533 	err = br_vlan_get_info(br, vid, &br_info);
2534 	if (err == 0) {
2535 		NL_SET_ERR_MSG_MOD(extack,
2536 				   "This VLAN is already configured by the bridge");
2537 		return notifier_from_errno(-EBUSY);
2538 	}
2539 
2540 	return NOTIFY_DONE;
2541 }
2542 
2543 static int
2544 dsa_slave_prechangeupper_sanity_check(struct net_device *dev,
2545 				      struct netdev_notifier_changeupper_info *info)
2546 {
2547 	struct dsa_switch *ds;
2548 	struct dsa_port *dp;
2549 	int err;
2550 
2551 	if (!dsa_slave_dev_check(dev))
2552 		return dsa_prevent_bridging_8021q_upper(dev, info);
2553 
2554 	dp = dsa_slave_to_port(dev);
2555 	ds = dp->ds;
2556 
2557 	if (ds->ops->port_prechangeupper) {
2558 		err = ds->ops->port_prechangeupper(ds, dp->index, info);
2559 		if (err)
2560 			return notifier_from_errno(err);
2561 	}
2562 
2563 	if (is_vlan_dev(info->upper_dev))
2564 		return dsa_slave_check_8021q_upper(dev, info);
2565 
2566 	return NOTIFY_DONE;
2567 }
2568 
2569 static int dsa_slave_netdevice_event(struct notifier_block *nb,
2570 				     unsigned long event, void *ptr)
2571 {
2572 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2573 
2574 	switch (event) {
2575 	case NETDEV_PRECHANGEUPPER: {
2576 		struct netdev_notifier_changeupper_info *info = ptr;
2577 		int err;
2578 
2579 		err = dsa_slave_prechangeupper_sanity_check(dev, info);
2580 		if (err != NOTIFY_DONE)
2581 			return err;
2582 
2583 		if (dsa_slave_dev_check(dev))
2584 			return dsa_slave_prechangeupper(dev, ptr);
2585 
2586 		if (netif_is_lag_master(dev))
2587 			return dsa_slave_lag_prechangeupper(dev, ptr);
2588 
2589 		break;
2590 	}
2591 	case NETDEV_CHANGEUPPER:
2592 		if (dsa_slave_dev_check(dev))
2593 			return dsa_slave_changeupper(dev, ptr);
2594 
2595 		if (netif_is_lag_master(dev))
2596 			return dsa_slave_lag_changeupper(dev, ptr);
2597 
2598 		break;
2599 	case NETDEV_CHANGELOWERSTATE: {
2600 		struct netdev_notifier_changelowerstate_info *info = ptr;
2601 		struct dsa_port *dp;
2602 		int err;
2603 
2604 		if (!dsa_slave_dev_check(dev))
2605 			break;
2606 
2607 		dp = dsa_slave_to_port(dev);
2608 
2609 		err = dsa_port_lag_change(dp, info->lower_state_info);
2610 		return notifier_from_errno(err);
2611 	}
2612 	case NETDEV_CHANGE:
2613 	case NETDEV_UP: {
2614 		/* Track state of master port.
2615 		 * DSA driver may require the master port (and indirectly
2616 		 * the tagger) to be available for some special operation.
2617 		 */
2618 		if (netdev_uses_dsa(dev)) {
2619 			struct dsa_port *cpu_dp = dev->dsa_ptr;
2620 			struct dsa_switch_tree *dst = cpu_dp->ds->dst;
2621 
2622 			/* Track when the master port is UP */
2623 			dsa_tree_master_oper_state_change(dst, dev,
2624 							  netif_oper_up(dev));
2625 
2626 			/* Track when the master port is ready and can accept
2627 			 * packet.
2628 			 * NETDEV_UP event is not enough to flag a port as ready.
2629 			 * We also have to wait for linkwatch_do_dev to dev_activate
2630 			 * and emit a NETDEV_CHANGE event.
2631 			 * We check if a master port is ready by checking if the dev
2632 			 * have a qdisc assigned and is not noop.
2633 			 */
2634 			dsa_tree_master_admin_state_change(dst, dev,
2635 							   !qdisc_tx_is_noop(dev));
2636 
2637 			return NOTIFY_OK;
2638 		}
2639 
2640 		return NOTIFY_DONE;
2641 	}
2642 	case NETDEV_GOING_DOWN: {
2643 		struct dsa_port *dp, *cpu_dp;
2644 		struct dsa_switch_tree *dst;
2645 		LIST_HEAD(close_list);
2646 
2647 		if (!netdev_uses_dsa(dev))
2648 			return NOTIFY_DONE;
2649 
2650 		cpu_dp = dev->dsa_ptr;
2651 		dst = cpu_dp->ds->dst;
2652 
2653 		dsa_tree_master_admin_state_change(dst, dev, false);
2654 
2655 		list_for_each_entry(dp, &dst->ports, list) {
2656 			if (!dsa_port_is_user(dp))
2657 				continue;
2658 
2659 			list_add(&dp->slave->close_list, &close_list);
2660 		}
2661 
2662 		dev_close_many(&close_list, true);
2663 
2664 		return NOTIFY_OK;
2665 	}
2666 	default:
2667 		break;
2668 	}
2669 
2670 	return NOTIFY_DONE;
2671 }
2672 
2673 static void
2674 dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
2675 {
2676 	struct switchdev_notifier_fdb_info info = {};
2677 
2678 	info.addr = switchdev_work->addr;
2679 	info.vid = switchdev_work->vid;
2680 	info.offloaded = true;
2681 	call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
2682 				 switchdev_work->orig_dev, &info.info, NULL);
2683 }
2684 
2685 static void dsa_slave_switchdev_event_work(struct work_struct *work)
2686 {
2687 	struct dsa_switchdev_event_work *switchdev_work =
2688 		container_of(work, struct dsa_switchdev_event_work, work);
2689 	const unsigned char *addr = switchdev_work->addr;
2690 	struct net_device *dev = switchdev_work->dev;
2691 	u16 vid = switchdev_work->vid;
2692 	struct dsa_switch *ds;
2693 	struct dsa_port *dp;
2694 	int err;
2695 
2696 	dp = dsa_slave_to_port(dev);
2697 	ds = dp->ds;
2698 
2699 	switch (switchdev_work->event) {
2700 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2701 		if (switchdev_work->host_addr)
2702 			err = dsa_port_bridge_host_fdb_add(dp, addr, vid);
2703 		else if (dp->lag)
2704 			err = dsa_port_lag_fdb_add(dp, addr, vid);
2705 		else
2706 			err = dsa_port_fdb_add(dp, addr, vid);
2707 		if (err) {
2708 			dev_err(ds->dev,
2709 				"port %d failed to add %pM vid %d to fdb: %d\n",
2710 				dp->index, addr, vid, err);
2711 			break;
2712 		}
2713 		dsa_fdb_offload_notify(switchdev_work);
2714 		break;
2715 
2716 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2717 		if (switchdev_work->host_addr)
2718 			err = dsa_port_bridge_host_fdb_del(dp, addr, vid);
2719 		else if (dp->lag)
2720 			err = dsa_port_lag_fdb_del(dp, addr, vid);
2721 		else
2722 			err = dsa_port_fdb_del(dp, addr, vid);
2723 		if (err) {
2724 			dev_err(ds->dev,
2725 				"port %d failed to delete %pM vid %d from fdb: %d\n",
2726 				dp->index, addr, vid, err);
2727 		}
2728 
2729 		break;
2730 	}
2731 
2732 	kfree(switchdev_work);
2733 }
2734 
2735 static bool dsa_foreign_dev_check(const struct net_device *dev,
2736 				  const struct net_device *foreign_dev)
2737 {
2738 	const struct dsa_port *dp = dsa_slave_to_port(dev);
2739 	struct dsa_switch_tree *dst = dp->ds->dst;
2740 
2741 	if (netif_is_bridge_master(foreign_dev))
2742 		return !dsa_tree_offloads_bridge_dev(dst, foreign_dev);
2743 
2744 	if (netif_is_bridge_port(foreign_dev))
2745 		return !dsa_tree_offloads_bridge_port(dst, foreign_dev);
2746 
2747 	/* Everything else is foreign */
2748 	return true;
2749 }
2750 
2751 static int dsa_slave_fdb_event(struct net_device *dev,
2752 			       struct net_device *orig_dev,
2753 			       unsigned long event, const void *ctx,
2754 			       const struct switchdev_notifier_fdb_info *fdb_info)
2755 {
2756 	struct dsa_switchdev_event_work *switchdev_work;
2757 	struct dsa_port *dp = dsa_slave_to_port(dev);
2758 	bool host_addr = fdb_info->is_local;
2759 	struct dsa_switch *ds = dp->ds;
2760 
2761 	if (ctx && ctx != dp)
2762 		return 0;
2763 
2764 	if (switchdev_fdb_is_dynamically_learned(fdb_info)) {
2765 		if (dsa_port_offloads_bridge_port(dp, orig_dev))
2766 			return 0;
2767 
2768 		/* FDB entries learned by the software bridge or by foreign
2769 		 * bridge ports should be installed as host addresses only if
2770 		 * the driver requests assisted learning.
2771 		 */
2772 		if (!ds->assisted_learning_on_cpu_port)
2773 			return 0;
2774 	}
2775 
2776 	/* Also treat FDB entries on foreign interfaces bridged with us as host
2777 	 * addresses.
2778 	 */
2779 	if (dsa_foreign_dev_check(dev, orig_dev))
2780 		host_addr = true;
2781 
2782 	/* Check early that we're not doing work in vain.
2783 	 * Host addresses on LAG ports still require regular FDB ops,
2784 	 * since the CPU port isn't in a LAG.
2785 	 */
2786 	if (dp->lag && !host_addr) {
2787 		if (!ds->ops->lag_fdb_add || !ds->ops->lag_fdb_del)
2788 			return -EOPNOTSUPP;
2789 	} else {
2790 		if (!ds->ops->port_fdb_add || !ds->ops->port_fdb_del)
2791 			return -EOPNOTSUPP;
2792 	}
2793 
2794 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2795 	if (!switchdev_work)
2796 		return -ENOMEM;
2797 
2798 	netdev_dbg(dev, "%s FDB entry towards %s, addr %pM vid %d%s\n",
2799 		   event == SWITCHDEV_FDB_ADD_TO_DEVICE ? "Adding" : "Deleting",
2800 		   orig_dev->name, fdb_info->addr, fdb_info->vid,
2801 		   host_addr ? " as host address" : "");
2802 
2803 	INIT_WORK(&switchdev_work->work, dsa_slave_switchdev_event_work);
2804 	switchdev_work->event = event;
2805 	switchdev_work->dev = dev;
2806 	switchdev_work->orig_dev = orig_dev;
2807 
2808 	ether_addr_copy(switchdev_work->addr, fdb_info->addr);
2809 	switchdev_work->vid = fdb_info->vid;
2810 	switchdev_work->host_addr = host_addr;
2811 
2812 	dsa_schedule_work(&switchdev_work->work);
2813 
2814 	return 0;
2815 }
2816 
2817 /* Called under rcu_read_lock() */
2818 static int dsa_slave_switchdev_event(struct notifier_block *unused,
2819 				     unsigned long event, void *ptr)
2820 {
2821 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2822 	int err;
2823 
2824 	switch (event) {
2825 	case SWITCHDEV_PORT_ATTR_SET:
2826 		err = switchdev_handle_port_attr_set(dev, ptr,
2827 						     dsa_slave_dev_check,
2828 						     dsa_slave_port_attr_set);
2829 		return notifier_from_errno(err);
2830 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2831 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2832 		err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
2833 							   dsa_slave_dev_check,
2834 							   dsa_foreign_dev_check,
2835 							   dsa_slave_fdb_event);
2836 		return notifier_from_errno(err);
2837 	default:
2838 		return NOTIFY_DONE;
2839 	}
2840 
2841 	return NOTIFY_OK;
2842 }
2843 
2844 static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
2845 					      unsigned long event, void *ptr)
2846 {
2847 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2848 	int err;
2849 
2850 	switch (event) {
2851 	case SWITCHDEV_PORT_OBJ_ADD:
2852 		err = switchdev_handle_port_obj_add_foreign(dev, ptr,
2853 							    dsa_slave_dev_check,
2854 							    dsa_foreign_dev_check,
2855 							    dsa_slave_port_obj_add);
2856 		return notifier_from_errno(err);
2857 	case SWITCHDEV_PORT_OBJ_DEL:
2858 		err = switchdev_handle_port_obj_del_foreign(dev, ptr,
2859 							    dsa_slave_dev_check,
2860 							    dsa_foreign_dev_check,
2861 							    dsa_slave_port_obj_del);
2862 		return notifier_from_errno(err);
2863 	case SWITCHDEV_PORT_ATTR_SET:
2864 		err = switchdev_handle_port_attr_set(dev, ptr,
2865 						     dsa_slave_dev_check,
2866 						     dsa_slave_port_attr_set);
2867 		return notifier_from_errno(err);
2868 	}
2869 
2870 	return NOTIFY_DONE;
2871 }
2872 
2873 static struct notifier_block dsa_slave_nb __read_mostly = {
2874 	.notifier_call  = dsa_slave_netdevice_event,
2875 };
2876 
2877 struct notifier_block dsa_slave_switchdev_notifier = {
2878 	.notifier_call = dsa_slave_switchdev_event,
2879 };
2880 
2881 struct notifier_block dsa_slave_switchdev_blocking_notifier = {
2882 	.notifier_call = dsa_slave_switchdev_blocking_event,
2883 };
2884 
2885 int dsa_slave_register_notifier(void)
2886 {
2887 	struct notifier_block *nb;
2888 	int err;
2889 
2890 	err = register_netdevice_notifier(&dsa_slave_nb);
2891 	if (err)
2892 		return err;
2893 
2894 	err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
2895 	if (err)
2896 		goto err_switchdev_nb;
2897 
2898 	nb = &dsa_slave_switchdev_blocking_notifier;
2899 	err = register_switchdev_blocking_notifier(nb);
2900 	if (err)
2901 		goto err_switchdev_blocking_nb;
2902 
2903 	return 0;
2904 
2905 err_switchdev_blocking_nb:
2906 	unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
2907 err_switchdev_nb:
2908 	unregister_netdevice_notifier(&dsa_slave_nb);
2909 	return err;
2910 }
2911 
2912 void dsa_slave_unregister_notifier(void)
2913 {
2914 	struct notifier_block *nb;
2915 	int err;
2916 
2917 	nb = &dsa_slave_switchdev_blocking_notifier;
2918 	err = unregister_switchdev_blocking_notifier(nb);
2919 	if (err)
2920 		pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
2921 
2922 	err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
2923 	if (err)
2924 		pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
2925 
2926 	err = unregister_netdevice_notifier(&dsa_slave_nb);
2927 	if (err)
2928 		pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
2929 }
2930