xref: /openbmc/linux/net/dsa/slave.c (revision bf459478)
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   * net/dsa/slave.c - Slave device handling
4   * Copyright (c) 2008-2009 Marvell Semiconductor
5   */
6  
7  #include <linux/list.h>
8  #include <linux/etherdevice.h>
9  #include <linux/netdevice.h>
10  #include <linux/phy.h>
11  #include <linux/phy_fixed.h>
12  #include <linux/phylink.h>
13  #include <linux/of_net.h>
14  #include <linux/of_mdio.h>
15  #include <linux/mdio.h>
16  #include <net/rtnetlink.h>
17  #include <net/pkt_cls.h>
18  #include <net/tc_act/tc_mirred.h>
19  #include <linux/if_bridge.h>
20  #include <linux/if_hsr.h>
21  #include <linux/netpoll.h>
22  #include <linux/ptp_classify.h>
23  
24  #include "dsa_priv.h"
25  
26  /* slave mii_bus handling ***************************************************/
27  static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
28  {
29  	struct dsa_switch *ds = bus->priv;
30  
31  	if (ds->phys_mii_mask & (1 << addr))
32  		return ds->ops->phy_read(ds, addr, reg);
33  
34  	return 0xffff;
35  }
36  
37  static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
38  {
39  	struct dsa_switch *ds = bus->priv;
40  
41  	if (ds->phys_mii_mask & (1 << addr))
42  		return ds->ops->phy_write(ds, addr, reg, val);
43  
44  	return 0;
45  }
46  
47  void dsa_slave_mii_bus_init(struct dsa_switch *ds)
48  {
49  	ds->slave_mii_bus->priv = (void *)ds;
50  	ds->slave_mii_bus->name = "dsa slave smi";
51  	ds->slave_mii_bus->read = dsa_slave_phy_read;
52  	ds->slave_mii_bus->write = dsa_slave_phy_write;
53  	snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
54  		 ds->dst->index, ds->index);
55  	ds->slave_mii_bus->parent = ds->dev;
56  	ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
57  }
58  
59  
60  /* slave device handling ****************************************************/
61  static int dsa_slave_get_iflink(const struct net_device *dev)
62  {
63  	return dsa_slave_to_master(dev)->ifindex;
64  }
65  
66  static int dsa_slave_open(struct net_device *dev)
67  {
68  	struct net_device *master = dsa_slave_to_master(dev);
69  	struct dsa_port *dp = dsa_slave_to_port(dev);
70  	int err;
71  
72  	err = dev_open(master, NULL);
73  	if (err < 0) {
74  		netdev_err(dev, "failed to open master %s\n", master->name);
75  		goto out;
76  	}
77  
78  	if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
79  		err = dev_uc_add(master, dev->dev_addr);
80  		if (err < 0)
81  			goto out;
82  	}
83  
84  	if (dev->flags & IFF_ALLMULTI) {
85  		err = dev_set_allmulti(master, 1);
86  		if (err < 0)
87  			goto del_unicast;
88  	}
89  	if (dev->flags & IFF_PROMISC) {
90  		err = dev_set_promiscuity(master, 1);
91  		if (err < 0)
92  			goto clear_allmulti;
93  	}
94  
95  	err = dsa_port_enable_rt(dp, dev->phydev);
96  	if (err)
97  		goto clear_promisc;
98  
99  	return 0;
100  
101  clear_promisc:
102  	if (dev->flags & IFF_PROMISC)
103  		dev_set_promiscuity(master, -1);
104  clear_allmulti:
105  	if (dev->flags & IFF_ALLMULTI)
106  		dev_set_allmulti(master, -1);
107  del_unicast:
108  	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
109  		dev_uc_del(master, dev->dev_addr);
110  out:
111  	return err;
112  }
113  
114  static int dsa_slave_close(struct net_device *dev)
115  {
116  	struct net_device *master = dsa_slave_to_master(dev);
117  	struct dsa_port *dp = dsa_slave_to_port(dev);
118  
119  	dsa_port_disable_rt(dp);
120  
121  	dev_mc_unsync(master, dev);
122  	dev_uc_unsync(master, dev);
123  	if (dev->flags & IFF_ALLMULTI)
124  		dev_set_allmulti(master, -1);
125  	if (dev->flags & IFF_PROMISC)
126  		dev_set_promiscuity(master, -1);
127  
128  	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
129  		dev_uc_del(master, dev->dev_addr);
130  
131  	return 0;
132  }
133  
134  static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
135  {
136  	struct net_device *master = dsa_slave_to_master(dev);
137  	if (dev->flags & IFF_UP) {
138  		if (change & IFF_ALLMULTI)
139  			dev_set_allmulti(master,
140  					 dev->flags & IFF_ALLMULTI ? 1 : -1);
141  		if (change & IFF_PROMISC)
142  			dev_set_promiscuity(master,
143  					    dev->flags & IFF_PROMISC ? 1 : -1);
144  	}
145  }
146  
147  static void dsa_slave_set_rx_mode(struct net_device *dev)
148  {
149  	struct net_device *master = dsa_slave_to_master(dev);
150  
151  	dev_mc_sync(master, dev);
152  	dev_uc_sync(master, dev);
153  }
154  
155  static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
156  {
157  	struct net_device *master = dsa_slave_to_master(dev);
158  	struct sockaddr *addr = a;
159  	int err;
160  
161  	if (!is_valid_ether_addr(addr->sa_data))
162  		return -EADDRNOTAVAIL;
163  
164  	if (!(dev->flags & IFF_UP))
165  		goto out;
166  
167  	if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
168  		err = dev_uc_add(master, addr->sa_data);
169  		if (err < 0)
170  			return err;
171  	}
172  
173  	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
174  		dev_uc_del(master, dev->dev_addr);
175  
176  out:
177  	ether_addr_copy(dev->dev_addr, addr->sa_data);
178  
179  	return 0;
180  }
181  
182  struct dsa_slave_dump_ctx {
183  	struct net_device *dev;
184  	struct sk_buff *skb;
185  	struct netlink_callback *cb;
186  	int idx;
187  };
188  
189  static int
190  dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
191  			   bool is_static, void *data)
192  {
193  	struct dsa_slave_dump_ctx *dump = data;
194  	u32 portid = NETLINK_CB(dump->cb->skb).portid;
195  	u32 seq = dump->cb->nlh->nlmsg_seq;
196  	struct nlmsghdr *nlh;
197  	struct ndmsg *ndm;
198  
199  	if (dump->idx < dump->cb->args[2])
200  		goto skip;
201  
202  	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
203  			sizeof(*ndm), NLM_F_MULTI);
204  	if (!nlh)
205  		return -EMSGSIZE;
206  
207  	ndm = nlmsg_data(nlh);
208  	ndm->ndm_family  = AF_BRIDGE;
209  	ndm->ndm_pad1    = 0;
210  	ndm->ndm_pad2    = 0;
211  	ndm->ndm_flags   = NTF_SELF;
212  	ndm->ndm_type    = 0;
213  	ndm->ndm_ifindex = dump->dev->ifindex;
214  	ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;
215  
216  	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
217  		goto nla_put_failure;
218  
219  	if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
220  		goto nla_put_failure;
221  
222  	nlmsg_end(dump->skb, nlh);
223  
224  skip:
225  	dump->idx++;
226  	return 0;
227  
228  nla_put_failure:
229  	nlmsg_cancel(dump->skb, nlh);
230  	return -EMSGSIZE;
231  }
232  
233  static int
234  dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
235  		   struct net_device *dev, struct net_device *filter_dev,
236  		   int *idx)
237  {
238  	struct dsa_port *dp = dsa_slave_to_port(dev);
239  	struct dsa_slave_dump_ctx dump = {
240  		.dev = dev,
241  		.skb = skb,
242  		.cb = cb,
243  		.idx = *idx,
244  	};
245  	int err;
246  
247  	err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
248  	*idx = dump.idx;
249  
250  	return err;
251  }
252  
253  static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
254  {
255  	struct dsa_slave_priv *p = netdev_priv(dev);
256  	struct dsa_switch *ds = p->dp->ds;
257  	int port = p->dp->index;
258  
259  	/* Pass through to switch driver if it supports timestamping */
260  	switch (cmd) {
261  	case SIOCGHWTSTAMP:
262  		if (ds->ops->port_hwtstamp_get)
263  			return ds->ops->port_hwtstamp_get(ds, port, ifr);
264  		break;
265  	case SIOCSHWTSTAMP:
266  		if (ds->ops->port_hwtstamp_set)
267  			return ds->ops->port_hwtstamp_set(ds, port, ifr);
268  		break;
269  	}
270  
271  	return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
272  }
273  
274  static int dsa_slave_port_attr_set(struct net_device *dev,
275  				   const struct switchdev_attr *attr,
276  				   struct netlink_ext_ack *extack)
277  {
278  	struct dsa_port *dp = dsa_slave_to_port(dev);
279  	int ret;
280  
281  	switch (attr->id) {
282  	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
283  		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
284  			return -EOPNOTSUPP;
285  
286  		ret = dsa_port_set_state(dp, attr->u.stp_state);
287  		break;
288  	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
289  		if (!dsa_port_offloads_bridge(dp, attr->orig_dev))
290  			return -EOPNOTSUPP;
291  
292  		ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
293  					      extack);
294  		break;
295  	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
296  		if (!dsa_port_offloads_bridge(dp, attr->orig_dev))
297  			return -EOPNOTSUPP;
298  
299  		ret = dsa_port_ageing_time(dp, attr->u.ageing_time);
300  		break;
301  	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
302  		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
303  			return -EOPNOTSUPP;
304  
305  		ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
306  						extack);
307  		break;
308  	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
309  		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
310  			return -EOPNOTSUPP;
311  
312  		ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack);
313  		break;
314  	case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
315  		if (!dsa_port_offloads_bridge(dp, attr->orig_dev))
316  			return -EOPNOTSUPP;
317  
318  		ret = dsa_port_mrouter(dp->cpu_dp, attr->u.mrouter, extack);
319  		break;
320  	default:
321  		ret = -EOPNOTSUPP;
322  		break;
323  	}
324  
325  	return ret;
326  }
327  
328  /* Must be called under rcu_read_lock() */
329  static int
330  dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave,
331  				      const struct switchdev_obj_port_vlan *vlan)
332  {
333  	struct net_device *upper_dev;
334  	struct list_head *iter;
335  
336  	netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
337  		u16 vid;
338  
339  		if (!is_vlan_dev(upper_dev))
340  			continue;
341  
342  		vid = vlan_dev_vlan_id(upper_dev);
343  		if (vid == vlan->vid)
344  			return -EBUSY;
345  	}
346  
347  	return 0;
348  }
349  
350  static int dsa_slave_vlan_add(struct net_device *dev,
351  			      const struct switchdev_obj *obj,
352  			      struct netlink_ext_ack *extack)
353  {
354  	struct net_device *master = dsa_slave_to_master(dev);
355  	struct dsa_port *dp = dsa_slave_to_port(dev);
356  	struct switchdev_obj_port_vlan vlan;
357  	int err;
358  
359  	if (dsa_port_skip_vlan_configuration(dp)) {
360  		NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
361  		return 0;
362  	}
363  
364  	vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
365  
366  	/* Deny adding a bridge VLAN when there is already an 802.1Q upper with
367  	 * the same VID.
368  	 */
369  	if (br_vlan_enabled(dp->bridge_dev)) {
370  		rcu_read_lock();
371  		err = dsa_slave_vlan_check_for_8021q_uppers(dev, &vlan);
372  		rcu_read_unlock();
373  		if (err) {
374  			NL_SET_ERR_MSG_MOD(extack,
375  					   "Port already has a VLAN upper with this VID");
376  			return err;
377  		}
378  	}
379  
380  	err = dsa_port_vlan_add(dp, &vlan, extack);
381  	if (err)
382  		return err;
383  
384  	/* We need the dedicated CPU port to be a member of the VLAN as well.
385  	 * Even though drivers often handle CPU membership in special ways,
386  	 * it doesn't make sense to program a PVID, so clear this flag.
387  	 */
388  	vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
389  
390  	err = dsa_port_vlan_add(dp->cpu_dp, &vlan, extack);
391  	if (err)
392  		return err;
393  
394  	return vlan_vid_add(master, htons(ETH_P_8021Q), vlan.vid);
395  }
396  
397  static int dsa_slave_port_obj_add(struct net_device *dev,
398  				  const struct switchdev_obj *obj,
399  				  struct netlink_ext_ack *extack)
400  {
401  	struct dsa_port *dp = dsa_slave_to_port(dev);
402  	int err;
403  
404  	switch (obj->id) {
405  	case SWITCHDEV_OBJ_ID_PORT_MDB:
406  		if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
407  			return -EOPNOTSUPP;
408  
409  		err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
410  		break;
411  	case SWITCHDEV_OBJ_ID_HOST_MDB:
412  		if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
413  			return -EOPNOTSUPP;
414  
415  		/* DSA can directly translate this to a normal MDB add,
416  		 * but on the CPU port.
417  		 */
418  		err = dsa_port_mdb_add(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
419  		break;
420  	case SWITCHDEV_OBJ_ID_PORT_VLAN:
421  		if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
422  			return -EOPNOTSUPP;
423  
424  		err = dsa_slave_vlan_add(dev, obj, extack);
425  		break;
426  	case SWITCHDEV_OBJ_ID_MRP:
427  		if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
428  			return -EOPNOTSUPP;
429  
430  		err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj));
431  		break;
432  	case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
433  		if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
434  			return -EOPNOTSUPP;
435  
436  		err = dsa_port_mrp_add_ring_role(dp,
437  						 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
438  		break;
439  	default:
440  		err = -EOPNOTSUPP;
441  		break;
442  	}
443  
444  	return err;
445  }
446  
447  static int dsa_slave_vlan_del(struct net_device *dev,
448  			      const struct switchdev_obj *obj)
449  {
450  	struct net_device *master = dsa_slave_to_master(dev);
451  	struct dsa_port *dp = dsa_slave_to_port(dev);
452  	struct switchdev_obj_port_vlan *vlan;
453  	int err;
454  
455  	if (dsa_port_skip_vlan_configuration(dp))
456  		return 0;
457  
458  	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
459  
460  	/* Do not deprogram the CPU port as it may be shared with other user
461  	 * ports which can be members of this VLAN as well.
462  	 */
463  	err = dsa_port_vlan_del(dp, vlan);
464  	if (err)
465  		return err;
466  
467  	vlan_vid_del(master, htons(ETH_P_8021Q), vlan->vid);
468  
469  	return 0;
470  }
471  
472  static int dsa_slave_port_obj_del(struct net_device *dev,
473  				  const struct switchdev_obj *obj)
474  {
475  	struct dsa_port *dp = dsa_slave_to_port(dev);
476  	int err;
477  
478  	switch (obj->id) {
479  	case SWITCHDEV_OBJ_ID_PORT_MDB:
480  		if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
481  			return -EOPNOTSUPP;
482  
483  		err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
484  		break;
485  	case SWITCHDEV_OBJ_ID_HOST_MDB:
486  		if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
487  			return -EOPNOTSUPP;
488  
489  		/* DSA can directly translate this to a normal MDB add,
490  		 * but on the CPU port.
491  		 */
492  		err = dsa_port_mdb_del(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
493  		break;
494  	case SWITCHDEV_OBJ_ID_PORT_VLAN:
495  		if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
496  			return -EOPNOTSUPP;
497  
498  		err = dsa_slave_vlan_del(dev, obj);
499  		break;
500  	case SWITCHDEV_OBJ_ID_MRP:
501  		if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
502  			return -EOPNOTSUPP;
503  
504  		err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj));
505  		break;
506  	case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
507  		if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
508  			return -EOPNOTSUPP;
509  
510  		err = dsa_port_mrp_del_ring_role(dp,
511  						 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
512  		break;
513  	default:
514  		err = -EOPNOTSUPP;
515  		break;
516  	}
517  
518  	return err;
519  }
520  
521  static int dsa_slave_get_port_parent_id(struct net_device *dev,
522  					struct netdev_phys_item_id *ppid)
523  {
524  	struct dsa_port *dp = dsa_slave_to_port(dev);
525  	struct dsa_switch *ds = dp->ds;
526  	struct dsa_switch_tree *dst = ds->dst;
527  
528  	/* For non-legacy ports, devlink is used and it takes
529  	 * care of the name generation. This ndo implementation
530  	 * should be removed with legacy support.
531  	 */
532  	if (dp->ds->devlink)
533  		return -EOPNOTSUPP;
534  
535  	ppid->id_len = sizeof(dst->index);
536  	memcpy(&ppid->id, &dst->index, ppid->id_len);
537  
538  	return 0;
539  }
540  
541  static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
542  						     struct sk_buff *skb)
543  {
544  #ifdef CONFIG_NET_POLL_CONTROLLER
545  	struct dsa_slave_priv *p = netdev_priv(dev);
546  
547  	return netpoll_send_skb(p->netpoll, skb);
548  #else
549  	BUG();
550  	return NETDEV_TX_OK;
551  #endif
552  }
553  
554  static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
555  				 struct sk_buff *skb)
556  {
557  	struct dsa_switch *ds = p->dp->ds;
558  	struct sk_buff *clone;
559  	unsigned int type;
560  
561  	type = ptp_classify_raw(skb);
562  	if (type == PTP_CLASS_NONE)
563  		return;
564  
565  	if (!ds->ops->port_txtstamp)
566  		return;
567  
568  	clone = skb_clone_sk(skb);
569  	if (!clone)
570  		return;
571  
572  	if (ds->ops->port_txtstamp(ds, p->dp->index, clone, type)) {
573  		DSA_SKB_CB(skb)->clone = clone;
574  		return;
575  	}
576  
577  	kfree_skb(clone);
578  }
579  
580  netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
581  {
582  	/* SKB for netpoll still need to be mangled with the protocol-specific
583  	 * tag to be successfully transmitted
584  	 */
585  	if (unlikely(netpoll_tx_running(dev)))
586  		return dsa_slave_netpoll_send_skb(dev, skb);
587  
588  	/* Queue the SKB for transmission on the parent interface, but
589  	 * do not modify its EtherType
590  	 */
591  	skb->dev = dsa_slave_to_master(dev);
592  	dev_queue_xmit(skb);
593  
594  	return NETDEV_TX_OK;
595  }
596  EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
597  
598  static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
599  {
600  	int needed_headroom = dev->needed_headroom;
601  	int needed_tailroom = dev->needed_tailroom;
602  
603  	/* For tail taggers, we need to pad short frames ourselves, to ensure
604  	 * that the tail tag does not fail at its role of being at the end of
605  	 * the packet, once the master interface pads the frame. Account for
606  	 * that pad length here, and pad later.
607  	 */
608  	if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
609  		needed_tailroom += ETH_ZLEN - skb->len;
610  	/* skb_headroom() returns unsigned int... */
611  	needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
612  	needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
613  
614  	if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
615  		/* No reallocation needed, yay! */
616  		return 0;
617  
618  	return pskb_expand_head(skb, needed_headroom, needed_tailroom,
619  				GFP_ATOMIC);
620  }
621  
622  static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
623  {
624  	struct dsa_slave_priv *p = netdev_priv(dev);
625  	struct sk_buff *nskb;
626  
627  	dev_sw_netstats_tx_add(dev, 1, skb->len);
628  
629  	DSA_SKB_CB(skb)->clone = NULL;
630  
631  	/* Identify PTP protocol packets, clone them, and pass them to the
632  	 * switch driver
633  	 */
634  	dsa_skb_tx_timestamp(p, skb);
635  
636  	if (dsa_realloc_skb(skb, dev)) {
637  		dev_kfree_skb_any(skb);
638  		return NETDEV_TX_OK;
639  	}
640  
641  	/* needed_tailroom should still be 'warm' in the cache line from
642  	 * dsa_realloc_skb(), which has also ensured that padding is safe.
643  	 */
644  	if (dev->needed_tailroom)
645  		eth_skb_pad(skb);
646  
647  	/* Transmit function may have to reallocate the original SKB,
648  	 * in which case it must have freed it. Only free it here on error.
649  	 */
650  	nskb = p->xmit(skb, dev);
651  	if (!nskb) {
652  		kfree_skb(skb);
653  		return NETDEV_TX_OK;
654  	}
655  
656  	return dsa_enqueue_skb(nskb, dev);
657  }
658  
659  /* ethtool operations *******************************************************/
660  
661  static void dsa_slave_get_drvinfo(struct net_device *dev,
662  				  struct ethtool_drvinfo *drvinfo)
663  {
664  	strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
665  	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
666  	strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
667  }
668  
669  static int dsa_slave_get_regs_len(struct net_device *dev)
670  {
671  	struct dsa_port *dp = dsa_slave_to_port(dev);
672  	struct dsa_switch *ds = dp->ds;
673  
674  	if (ds->ops->get_regs_len)
675  		return ds->ops->get_regs_len(ds, dp->index);
676  
677  	return -EOPNOTSUPP;
678  }
679  
680  static void
681  dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
682  {
683  	struct dsa_port *dp = dsa_slave_to_port(dev);
684  	struct dsa_switch *ds = dp->ds;
685  
686  	if (ds->ops->get_regs)
687  		ds->ops->get_regs(ds, dp->index, regs, _p);
688  }
689  
690  static int dsa_slave_nway_reset(struct net_device *dev)
691  {
692  	struct dsa_port *dp = dsa_slave_to_port(dev);
693  
694  	return phylink_ethtool_nway_reset(dp->pl);
695  }
696  
697  static int dsa_slave_get_eeprom_len(struct net_device *dev)
698  {
699  	struct dsa_port *dp = dsa_slave_to_port(dev);
700  	struct dsa_switch *ds = dp->ds;
701  
702  	if (ds->cd && ds->cd->eeprom_len)
703  		return ds->cd->eeprom_len;
704  
705  	if (ds->ops->get_eeprom_len)
706  		return ds->ops->get_eeprom_len(ds);
707  
708  	return 0;
709  }
710  
711  static int dsa_slave_get_eeprom(struct net_device *dev,
712  				struct ethtool_eeprom *eeprom, u8 *data)
713  {
714  	struct dsa_port *dp = dsa_slave_to_port(dev);
715  	struct dsa_switch *ds = dp->ds;
716  
717  	if (ds->ops->get_eeprom)
718  		return ds->ops->get_eeprom(ds, eeprom, data);
719  
720  	return -EOPNOTSUPP;
721  }
722  
723  static int dsa_slave_set_eeprom(struct net_device *dev,
724  				struct ethtool_eeprom *eeprom, u8 *data)
725  {
726  	struct dsa_port *dp = dsa_slave_to_port(dev);
727  	struct dsa_switch *ds = dp->ds;
728  
729  	if (ds->ops->set_eeprom)
730  		return ds->ops->set_eeprom(ds, eeprom, data);
731  
732  	return -EOPNOTSUPP;
733  }
734  
735  static void dsa_slave_get_strings(struct net_device *dev,
736  				  uint32_t stringset, uint8_t *data)
737  {
738  	struct dsa_port *dp = dsa_slave_to_port(dev);
739  	struct dsa_switch *ds = dp->ds;
740  
741  	if (stringset == ETH_SS_STATS) {
742  		int len = ETH_GSTRING_LEN;
743  
744  		strncpy(data, "tx_packets", len);
745  		strncpy(data + len, "tx_bytes", len);
746  		strncpy(data + 2 * len, "rx_packets", len);
747  		strncpy(data + 3 * len, "rx_bytes", len);
748  		if (ds->ops->get_strings)
749  			ds->ops->get_strings(ds, dp->index, stringset,
750  					     data + 4 * len);
751  	}
752  }
753  
754  static void dsa_slave_get_ethtool_stats(struct net_device *dev,
755  					struct ethtool_stats *stats,
756  					uint64_t *data)
757  {
758  	struct dsa_port *dp = dsa_slave_to_port(dev);
759  	struct dsa_switch *ds = dp->ds;
760  	struct pcpu_sw_netstats *s;
761  	unsigned int start;
762  	int i;
763  
764  	for_each_possible_cpu(i) {
765  		u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
766  
767  		s = per_cpu_ptr(dev->tstats, i);
768  		do {
769  			start = u64_stats_fetch_begin_irq(&s->syncp);
770  			tx_packets = s->tx_packets;
771  			tx_bytes = s->tx_bytes;
772  			rx_packets = s->rx_packets;
773  			rx_bytes = s->rx_bytes;
774  		} while (u64_stats_fetch_retry_irq(&s->syncp, start));
775  		data[0] += tx_packets;
776  		data[1] += tx_bytes;
777  		data[2] += rx_packets;
778  		data[3] += rx_bytes;
779  	}
780  	if (ds->ops->get_ethtool_stats)
781  		ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
782  }
783  
784  static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
785  {
786  	struct dsa_port *dp = dsa_slave_to_port(dev);
787  	struct dsa_switch *ds = dp->ds;
788  
789  	if (sset == ETH_SS_STATS) {
790  		int count;
791  
792  		count = 4;
793  		if (ds->ops->get_sset_count)
794  			count += ds->ops->get_sset_count(ds, dp->index, sset);
795  
796  		return count;
797  	}
798  
799  	return -EOPNOTSUPP;
800  }
801  
802  static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
803  {
804  	struct dsa_port *dp = dsa_slave_to_port(dev);
805  	struct dsa_switch *ds = dp->ds;
806  
807  	phylink_ethtool_get_wol(dp->pl, w);
808  
809  	if (ds->ops->get_wol)
810  		ds->ops->get_wol(ds, dp->index, w);
811  }
812  
813  static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
814  {
815  	struct dsa_port *dp = dsa_slave_to_port(dev);
816  	struct dsa_switch *ds = dp->ds;
817  	int ret = -EOPNOTSUPP;
818  
819  	phylink_ethtool_set_wol(dp->pl, w);
820  
821  	if (ds->ops->set_wol)
822  		ret = ds->ops->set_wol(ds, dp->index, w);
823  
824  	return ret;
825  }
826  
827  static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
828  {
829  	struct dsa_port *dp = dsa_slave_to_port(dev);
830  	struct dsa_switch *ds = dp->ds;
831  	int ret;
832  
833  	/* Port's PHY and MAC both need to be EEE capable */
834  	if (!dev->phydev || !dp->pl)
835  		return -ENODEV;
836  
837  	if (!ds->ops->set_mac_eee)
838  		return -EOPNOTSUPP;
839  
840  	ret = ds->ops->set_mac_eee(ds, dp->index, e);
841  	if (ret)
842  		return ret;
843  
844  	return phylink_ethtool_set_eee(dp->pl, e);
845  }
846  
847  static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
848  {
849  	struct dsa_port *dp = dsa_slave_to_port(dev);
850  	struct dsa_switch *ds = dp->ds;
851  	int ret;
852  
853  	/* Port's PHY and MAC both need to be EEE capable */
854  	if (!dev->phydev || !dp->pl)
855  		return -ENODEV;
856  
857  	if (!ds->ops->get_mac_eee)
858  		return -EOPNOTSUPP;
859  
860  	ret = ds->ops->get_mac_eee(ds, dp->index, e);
861  	if (ret)
862  		return ret;
863  
864  	return phylink_ethtool_get_eee(dp->pl, e);
865  }
866  
867  static int dsa_slave_get_link_ksettings(struct net_device *dev,
868  					struct ethtool_link_ksettings *cmd)
869  {
870  	struct dsa_port *dp = dsa_slave_to_port(dev);
871  
872  	return phylink_ethtool_ksettings_get(dp->pl, cmd);
873  }
874  
875  static int dsa_slave_set_link_ksettings(struct net_device *dev,
876  					const struct ethtool_link_ksettings *cmd)
877  {
878  	struct dsa_port *dp = dsa_slave_to_port(dev);
879  
880  	return phylink_ethtool_ksettings_set(dp->pl, cmd);
881  }
882  
883  static void dsa_slave_get_pauseparam(struct net_device *dev,
884  				     struct ethtool_pauseparam *pause)
885  {
886  	struct dsa_port *dp = dsa_slave_to_port(dev);
887  
888  	phylink_ethtool_get_pauseparam(dp->pl, pause);
889  }
890  
891  static int dsa_slave_set_pauseparam(struct net_device *dev,
892  				    struct ethtool_pauseparam *pause)
893  {
894  	struct dsa_port *dp = dsa_slave_to_port(dev);
895  
896  	return phylink_ethtool_set_pauseparam(dp->pl, pause);
897  }
898  
899  #ifdef CONFIG_NET_POLL_CONTROLLER
900  static int dsa_slave_netpoll_setup(struct net_device *dev,
901  				   struct netpoll_info *ni)
902  {
903  	struct net_device *master = dsa_slave_to_master(dev);
904  	struct dsa_slave_priv *p = netdev_priv(dev);
905  	struct netpoll *netpoll;
906  	int err = 0;
907  
908  	netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
909  	if (!netpoll)
910  		return -ENOMEM;
911  
912  	err = __netpoll_setup(netpoll, master);
913  	if (err) {
914  		kfree(netpoll);
915  		goto out;
916  	}
917  
918  	p->netpoll = netpoll;
919  out:
920  	return err;
921  }
922  
923  static void dsa_slave_netpoll_cleanup(struct net_device *dev)
924  {
925  	struct dsa_slave_priv *p = netdev_priv(dev);
926  	struct netpoll *netpoll = p->netpoll;
927  
928  	if (!netpoll)
929  		return;
930  
931  	p->netpoll = NULL;
932  
933  	__netpoll_free(netpoll);
934  }
935  
936  static void dsa_slave_poll_controller(struct net_device *dev)
937  {
938  }
939  #endif
940  
941  static int dsa_slave_get_phys_port_name(struct net_device *dev,
942  					char *name, size_t len)
943  {
944  	struct dsa_port *dp = dsa_slave_to_port(dev);
945  
946  	/* For non-legacy ports, devlink is used and it takes
947  	 * care of the name generation. This ndo implementation
948  	 * should be removed with legacy support.
949  	 */
950  	if (dp->ds->devlink)
951  		return -EOPNOTSUPP;
952  
953  	if (snprintf(name, len, "p%d", dp->index) >= len)
954  		return -EINVAL;
955  
956  	return 0;
957  }
958  
959  static struct dsa_mall_tc_entry *
960  dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
961  {
962  	struct dsa_slave_priv *p = netdev_priv(dev);
963  	struct dsa_mall_tc_entry *mall_tc_entry;
964  
965  	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
966  		if (mall_tc_entry->cookie == cookie)
967  			return mall_tc_entry;
968  
969  	return NULL;
970  }
971  
972  static int
973  dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
974  				  struct tc_cls_matchall_offload *cls,
975  				  bool ingress)
976  {
977  	struct dsa_port *dp = dsa_slave_to_port(dev);
978  	struct dsa_slave_priv *p = netdev_priv(dev);
979  	struct dsa_mall_mirror_tc_entry *mirror;
980  	struct dsa_mall_tc_entry *mall_tc_entry;
981  	struct dsa_switch *ds = dp->ds;
982  	struct flow_action_entry *act;
983  	struct dsa_port *to_dp;
984  	int err;
985  
986  	if (!ds->ops->port_mirror_add)
987  		return -EOPNOTSUPP;
988  
989  	if (!flow_action_basic_hw_stats_check(&cls->rule->action,
990  					      cls->common.extack))
991  		return -EOPNOTSUPP;
992  
993  	act = &cls->rule->action.entries[0];
994  
995  	if (!act->dev)
996  		return -EINVAL;
997  
998  	if (!dsa_slave_dev_check(act->dev))
999  		return -EOPNOTSUPP;
1000  
1001  	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1002  	if (!mall_tc_entry)
1003  		return -ENOMEM;
1004  
1005  	mall_tc_entry->cookie = cls->cookie;
1006  	mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
1007  	mirror = &mall_tc_entry->mirror;
1008  
1009  	to_dp = dsa_slave_to_port(act->dev);
1010  
1011  	mirror->to_local_port = to_dp->index;
1012  	mirror->ingress = ingress;
1013  
1014  	err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress);
1015  	if (err) {
1016  		kfree(mall_tc_entry);
1017  		return err;
1018  	}
1019  
1020  	list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1021  
1022  	return err;
1023  }
1024  
1025  static int
1026  dsa_slave_add_cls_matchall_police(struct net_device *dev,
1027  				  struct tc_cls_matchall_offload *cls,
1028  				  bool ingress)
1029  {
1030  	struct netlink_ext_ack *extack = cls->common.extack;
1031  	struct dsa_port *dp = dsa_slave_to_port(dev);
1032  	struct dsa_slave_priv *p = netdev_priv(dev);
1033  	struct dsa_mall_policer_tc_entry *policer;
1034  	struct dsa_mall_tc_entry *mall_tc_entry;
1035  	struct dsa_switch *ds = dp->ds;
1036  	struct flow_action_entry *act;
1037  	int err;
1038  
1039  	if (!ds->ops->port_policer_add) {
1040  		NL_SET_ERR_MSG_MOD(extack,
1041  				   "Policing offload not implemented");
1042  		return -EOPNOTSUPP;
1043  	}
1044  
1045  	if (!ingress) {
1046  		NL_SET_ERR_MSG_MOD(extack,
1047  				   "Only supported on ingress qdisc");
1048  		return -EOPNOTSUPP;
1049  	}
1050  
1051  	if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1052  					      cls->common.extack))
1053  		return -EOPNOTSUPP;
1054  
1055  	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
1056  		if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) {
1057  			NL_SET_ERR_MSG_MOD(extack,
1058  					   "Only one port policer allowed");
1059  			return -EEXIST;
1060  		}
1061  	}
1062  
1063  	act = &cls->rule->action.entries[0];
1064  
1065  	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1066  	if (!mall_tc_entry)
1067  		return -ENOMEM;
1068  
1069  	mall_tc_entry->cookie = cls->cookie;
1070  	mall_tc_entry->type = DSA_PORT_MALL_POLICER;
1071  	policer = &mall_tc_entry->policer;
1072  	policer->rate_bytes_per_sec = act->police.rate_bytes_ps;
1073  	policer->burst = act->police.burst;
1074  
1075  	err = ds->ops->port_policer_add(ds, dp->index, policer);
1076  	if (err) {
1077  		kfree(mall_tc_entry);
1078  		return err;
1079  	}
1080  
1081  	list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1082  
1083  	return err;
1084  }
1085  
1086  static int dsa_slave_add_cls_matchall(struct net_device *dev,
1087  				      struct tc_cls_matchall_offload *cls,
1088  				      bool ingress)
1089  {
1090  	int err = -EOPNOTSUPP;
1091  
1092  	if (cls->common.protocol == htons(ETH_P_ALL) &&
1093  	    flow_offload_has_one_action(&cls->rule->action) &&
1094  	    cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
1095  		err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress);
1096  	else if (flow_offload_has_one_action(&cls->rule->action) &&
1097  		 cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
1098  		err = dsa_slave_add_cls_matchall_police(dev, cls, ingress);
1099  
1100  	return err;
1101  }
1102  
1103  static void dsa_slave_del_cls_matchall(struct net_device *dev,
1104  				       struct tc_cls_matchall_offload *cls)
1105  {
1106  	struct dsa_port *dp = dsa_slave_to_port(dev);
1107  	struct dsa_mall_tc_entry *mall_tc_entry;
1108  	struct dsa_switch *ds = dp->ds;
1109  
1110  	mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
1111  	if (!mall_tc_entry)
1112  		return;
1113  
1114  	list_del(&mall_tc_entry->list);
1115  
1116  	switch (mall_tc_entry->type) {
1117  	case DSA_PORT_MALL_MIRROR:
1118  		if (ds->ops->port_mirror_del)
1119  			ds->ops->port_mirror_del(ds, dp->index,
1120  						 &mall_tc_entry->mirror);
1121  		break;
1122  	case DSA_PORT_MALL_POLICER:
1123  		if (ds->ops->port_policer_del)
1124  			ds->ops->port_policer_del(ds, dp->index);
1125  		break;
1126  	default:
1127  		WARN_ON(1);
1128  	}
1129  
1130  	kfree(mall_tc_entry);
1131  }
1132  
1133  static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
1134  					   struct tc_cls_matchall_offload *cls,
1135  					   bool ingress)
1136  {
1137  	if (cls->common.chain_index)
1138  		return -EOPNOTSUPP;
1139  
1140  	switch (cls->command) {
1141  	case TC_CLSMATCHALL_REPLACE:
1142  		return dsa_slave_add_cls_matchall(dev, cls, ingress);
1143  	case TC_CLSMATCHALL_DESTROY:
1144  		dsa_slave_del_cls_matchall(dev, cls);
1145  		return 0;
1146  	default:
1147  		return -EOPNOTSUPP;
1148  	}
1149  }
1150  
1151  static int dsa_slave_add_cls_flower(struct net_device *dev,
1152  				    struct flow_cls_offload *cls,
1153  				    bool ingress)
1154  {
1155  	struct dsa_port *dp = dsa_slave_to_port(dev);
1156  	struct dsa_switch *ds = dp->ds;
1157  	int port = dp->index;
1158  
1159  	if (!ds->ops->cls_flower_add)
1160  		return -EOPNOTSUPP;
1161  
1162  	return ds->ops->cls_flower_add(ds, port, cls, ingress);
1163  }
1164  
1165  static int dsa_slave_del_cls_flower(struct net_device *dev,
1166  				    struct flow_cls_offload *cls,
1167  				    bool ingress)
1168  {
1169  	struct dsa_port *dp = dsa_slave_to_port(dev);
1170  	struct dsa_switch *ds = dp->ds;
1171  	int port = dp->index;
1172  
1173  	if (!ds->ops->cls_flower_del)
1174  		return -EOPNOTSUPP;
1175  
1176  	return ds->ops->cls_flower_del(ds, port, cls, ingress);
1177  }
1178  
1179  static int dsa_slave_stats_cls_flower(struct net_device *dev,
1180  				      struct flow_cls_offload *cls,
1181  				      bool ingress)
1182  {
1183  	struct dsa_port *dp = dsa_slave_to_port(dev);
1184  	struct dsa_switch *ds = dp->ds;
1185  	int port = dp->index;
1186  
1187  	if (!ds->ops->cls_flower_stats)
1188  		return -EOPNOTSUPP;
1189  
1190  	return ds->ops->cls_flower_stats(ds, port, cls, ingress);
1191  }
1192  
1193  static int dsa_slave_setup_tc_cls_flower(struct net_device *dev,
1194  					 struct flow_cls_offload *cls,
1195  					 bool ingress)
1196  {
1197  	switch (cls->command) {
1198  	case FLOW_CLS_REPLACE:
1199  		return dsa_slave_add_cls_flower(dev, cls, ingress);
1200  	case FLOW_CLS_DESTROY:
1201  		return dsa_slave_del_cls_flower(dev, cls, ingress);
1202  	case FLOW_CLS_STATS:
1203  		return dsa_slave_stats_cls_flower(dev, cls, ingress);
1204  	default:
1205  		return -EOPNOTSUPP;
1206  	}
1207  }
1208  
1209  static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1210  				       void *cb_priv, bool ingress)
1211  {
1212  	struct net_device *dev = cb_priv;
1213  
1214  	if (!tc_can_offload(dev))
1215  		return -EOPNOTSUPP;
1216  
1217  	switch (type) {
1218  	case TC_SETUP_CLSMATCHALL:
1219  		return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
1220  	case TC_SETUP_CLSFLOWER:
1221  		return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress);
1222  	default:
1223  		return -EOPNOTSUPP;
1224  	}
1225  }
1226  
1227  static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
1228  					  void *type_data, void *cb_priv)
1229  {
1230  	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
1231  }
1232  
1233  static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
1234  					  void *type_data, void *cb_priv)
1235  {
1236  	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
1237  }
1238  
1239  static LIST_HEAD(dsa_slave_block_cb_list);
1240  
1241  static int dsa_slave_setup_tc_block(struct net_device *dev,
1242  				    struct flow_block_offload *f)
1243  {
1244  	struct flow_block_cb *block_cb;
1245  	flow_setup_cb_t *cb;
1246  
1247  	if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1248  		cb = dsa_slave_setup_tc_block_cb_ig;
1249  	else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1250  		cb = dsa_slave_setup_tc_block_cb_eg;
1251  	else
1252  		return -EOPNOTSUPP;
1253  
1254  	f->driver_block_list = &dsa_slave_block_cb_list;
1255  
1256  	switch (f->command) {
1257  	case FLOW_BLOCK_BIND:
1258  		if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
1259  			return -EBUSY;
1260  
1261  		block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
1262  		if (IS_ERR(block_cb))
1263  			return PTR_ERR(block_cb);
1264  
1265  		flow_block_cb_add(block_cb, f);
1266  		list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
1267  		return 0;
1268  	case FLOW_BLOCK_UNBIND:
1269  		block_cb = flow_block_cb_lookup(f->block, cb, dev);
1270  		if (!block_cb)
1271  			return -ENOENT;
1272  
1273  		flow_block_cb_remove(block_cb, f);
1274  		list_del(&block_cb->driver_list);
1275  		return 0;
1276  	default:
1277  		return -EOPNOTSUPP;
1278  	}
1279  }
1280  
1281  static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
1282  			      void *type_data)
1283  {
1284  	struct dsa_port *dp = dsa_slave_to_port(dev);
1285  	struct dsa_switch *ds = dp->ds;
1286  
1287  	if (type == TC_SETUP_BLOCK)
1288  		return dsa_slave_setup_tc_block(dev, type_data);
1289  
1290  	if (!ds->ops->port_setup_tc)
1291  		return -EOPNOTSUPP;
1292  
1293  	return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
1294  }
1295  
1296  static int dsa_slave_get_rxnfc(struct net_device *dev,
1297  			       struct ethtool_rxnfc *nfc, u32 *rule_locs)
1298  {
1299  	struct dsa_port *dp = dsa_slave_to_port(dev);
1300  	struct dsa_switch *ds = dp->ds;
1301  
1302  	if (!ds->ops->get_rxnfc)
1303  		return -EOPNOTSUPP;
1304  
1305  	return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
1306  }
1307  
1308  static int dsa_slave_set_rxnfc(struct net_device *dev,
1309  			       struct ethtool_rxnfc *nfc)
1310  {
1311  	struct dsa_port *dp = dsa_slave_to_port(dev);
1312  	struct dsa_switch *ds = dp->ds;
1313  
1314  	if (!ds->ops->set_rxnfc)
1315  		return -EOPNOTSUPP;
1316  
1317  	return ds->ops->set_rxnfc(ds, dp->index, nfc);
1318  }
1319  
1320  static int dsa_slave_get_ts_info(struct net_device *dev,
1321  				 struct ethtool_ts_info *ts)
1322  {
1323  	struct dsa_slave_priv *p = netdev_priv(dev);
1324  	struct dsa_switch *ds = p->dp->ds;
1325  
1326  	if (!ds->ops->get_ts_info)
1327  		return -EOPNOTSUPP;
1328  
1329  	return ds->ops->get_ts_info(ds, p->dp->index, ts);
1330  }
1331  
1332  static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
1333  				     u16 vid)
1334  {
1335  	struct net_device *master = dsa_slave_to_master(dev);
1336  	struct dsa_port *dp = dsa_slave_to_port(dev);
1337  	struct switchdev_obj_port_vlan vlan = {
1338  		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
1339  		.vid = vid,
1340  		/* This API only allows programming tagged, non-PVID VIDs */
1341  		.flags = 0,
1342  	};
1343  	struct netlink_ext_ack extack = {0};
1344  	int ret;
1345  
1346  	/* User port... */
1347  	ret = dsa_port_vlan_add(dp, &vlan, &extack);
1348  	if (ret) {
1349  		if (extack._msg)
1350  			netdev_err(dev, "%s\n", extack._msg);
1351  		return ret;
1352  	}
1353  
1354  	/* And CPU port... */
1355  	ret = dsa_port_vlan_add(dp->cpu_dp, &vlan, &extack);
1356  	if (ret) {
1357  		if (extack._msg)
1358  			netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index,
1359  				   extack._msg);
1360  		return ret;
1361  	}
1362  
1363  	return vlan_vid_add(master, proto, vid);
1364  }
1365  
1366  static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
1367  				      u16 vid)
1368  {
1369  	struct net_device *master = dsa_slave_to_master(dev);
1370  	struct dsa_port *dp = dsa_slave_to_port(dev);
1371  	struct switchdev_obj_port_vlan vlan = {
1372  		.vid = vid,
1373  		/* This API only allows programming tagged, non-PVID VIDs */
1374  		.flags = 0,
1375  	};
1376  	int err;
1377  
1378  	/* Do not deprogram the CPU port as it may be shared with other user
1379  	 * ports which can be members of this VLAN as well.
1380  	 */
1381  	err = dsa_port_vlan_del(dp, &vlan);
1382  	if (err)
1383  		return err;
1384  
1385  	vlan_vid_del(master, proto, vid);
1386  
1387  	return 0;
1388  }
1389  
1390  struct dsa_hw_port {
1391  	struct list_head list;
1392  	struct net_device *dev;
1393  	int old_mtu;
1394  };
1395  
1396  static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu)
1397  {
1398  	const struct dsa_hw_port *p;
1399  	int err;
1400  
1401  	list_for_each_entry(p, hw_port_list, list) {
1402  		if (p->dev->mtu == mtu)
1403  			continue;
1404  
1405  		err = dev_set_mtu(p->dev, mtu);
1406  		if (err)
1407  			goto rollback;
1408  	}
1409  
1410  	return 0;
1411  
1412  rollback:
1413  	list_for_each_entry_continue_reverse(p, hw_port_list, list) {
1414  		if (p->dev->mtu == p->old_mtu)
1415  			continue;
1416  
1417  		if (dev_set_mtu(p->dev, p->old_mtu))
1418  			netdev_err(p->dev, "Failed to restore MTU\n");
1419  	}
1420  
1421  	return err;
1422  }
1423  
1424  static void dsa_hw_port_list_free(struct list_head *hw_port_list)
1425  {
1426  	struct dsa_hw_port *p, *n;
1427  
1428  	list_for_each_entry_safe(p, n, hw_port_list, list)
1429  		kfree(p);
1430  }
1431  
1432  /* Make the hardware datapath to/from @dev limited to a common MTU */
1433  static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
1434  {
1435  	struct list_head hw_port_list;
1436  	struct dsa_switch_tree *dst;
1437  	int min_mtu = ETH_MAX_MTU;
1438  	struct dsa_port *other_dp;
1439  	int err;
1440  
1441  	if (!dp->ds->mtu_enforcement_ingress)
1442  		return;
1443  
1444  	if (!dp->bridge_dev)
1445  		return;
1446  
1447  	INIT_LIST_HEAD(&hw_port_list);
1448  
1449  	/* Populate the list of ports that are part of the same bridge
1450  	 * as the newly added/modified port
1451  	 */
1452  	list_for_each_entry(dst, &dsa_tree_list, list) {
1453  		list_for_each_entry(other_dp, &dst->ports, list) {
1454  			struct dsa_hw_port *hw_port;
1455  			struct net_device *slave;
1456  
1457  			if (other_dp->type != DSA_PORT_TYPE_USER)
1458  				continue;
1459  
1460  			if (other_dp->bridge_dev != dp->bridge_dev)
1461  				continue;
1462  
1463  			if (!other_dp->ds->mtu_enforcement_ingress)
1464  				continue;
1465  
1466  			slave = other_dp->slave;
1467  
1468  			if (min_mtu > slave->mtu)
1469  				min_mtu = slave->mtu;
1470  
1471  			hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
1472  			if (!hw_port)
1473  				goto out;
1474  
1475  			hw_port->dev = slave;
1476  			hw_port->old_mtu = slave->mtu;
1477  
1478  			list_add(&hw_port->list, &hw_port_list);
1479  		}
1480  	}
1481  
1482  	/* Attempt to configure the entire hardware bridge to the newly added
1483  	 * interface's MTU first, regardless of whether the intention of the
1484  	 * user was to raise or lower it.
1485  	 */
1486  	err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu);
1487  	if (!err)
1488  		goto out;
1489  
1490  	/* Clearly that didn't work out so well, so just set the minimum MTU on
1491  	 * all hardware bridge ports now. If this fails too, then all ports will
1492  	 * still have their old MTU rolled back anyway.
1493  	 */
1494  	dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu);
1495  
1496  out:
1497  	dsa_hw_port_list_free(&hw_port_list);
1498  }
1499  
1500  int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
1501  {
1502  	struct net_device *master = dsa_slave_to_master(dev);
1503  	struct dsa_port *dp = dsa_slave_to_port(dev);
1504  	struct dsa_slave_priv *p = netdev_priv(dev);
1505  	struct dsa_switch *ds = p->dp->ds;
1506  	struct dsa_port *cpu_dp;
1507  	int port = p->dp->index;
1508  	int largest_mtu = 0;
1509  	int new_master_mtu;
1510  	int old_master_mtu;
1511  	int mtu_limit;
1512  	int cpu_mtu;
1513  	int err, i;
1514  
1515  	if (!ds->ops->port_change_mtu)
1516  		return -EOPNOTSUPP;
1517  
1518  	for (i = 0; i < ds->num_ports; i++) {
1519  		int slave_mtu;
1520  
1521  		if (!dsa_is_user_port(ds, i))
1522  			continue;
1523  
1524  		/* During probe, this function will be called for each slave
1525  		 * device, while not all of them have been allocated. That's
1526  		 * ok, it doesn't change what the maximum is, so ignore it.
1527  		 */
1528  		if (!dsa_to_port(ds, i)->slave)
1529  			continue;
1530  
1531  		/* Pretend that we already applied the setting, which we
1532  		 * actually haven't (still haven't done all integrity checks)
1533  		 */
1534  		if (i == port)
1535  			slave_mtu = new_mtu;
1536  		else
1537  			slave_mtu = dsa_to_port(ds, i)->slave->mtu;
1538  
1539  		if (largest_mtu < slave_mtu)
1540  			largest_mtu = slave_mtu;
1541  	}
1542  
1543  	cpu_dp = dsa_to_port(ds, port)->cpu_dp;
1544  
1545  	mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
1546  	old_master_mtu = master->mtu;
1547  	new_master_mtu = largest_mtu + cpu_dp->tag_ops->overhead;
1548  	if (new_master_mtu > mtu_limit)
1549  		return -ERANGE;
1550  
1551  	/* If the master MTU isn't over limit, there's no need to check the CPU
1552  	 * MTU, since that surely isn't either.
1553  	 */
1554  	cpu_mtu = largest_mtu;
1555  
1556  	/* Start applying stuff */
1557  	if (new_master_mtu != old_master_mtu) {
1558  		err = dev_set_mtu(master, new_master_mtu);
1559  		if (err < 0)
1560  			goto out_master_failed;
1561  
1562  		/* We only need to propagate the MTU of the CPU port to
1563  		 * upstream switches.
1564  		 */
1565  		err = dsa_port_mtu_change(cpu_dp, cpu_mtu, true);
1566  		if (err)
1567  			goto out_cpu_failed;
1568  	}
1569  
1570  	err = dsa_port_mtu_change(dp, new_mtu, false);
1571  	if (err)
1572  		goto out_port_failed;
1573  
1574  	dev->mtu = new_mtu;
1575  
1576  	dsa_bridge_mtu_normalization(dp);
1577  
1578  	return 0;
1579  
1580  out_port_failed:
1581  	if (new_master_mtu != old_master_mtu)
1582  		dsa_port_mtu_change(cpu_dp, old_master_mtu -
1583  				    cpu_dp->tag_ops->overhead,
1584  				    true);
1585  out_cpu_failed:
1586  	if (new_master_mtu != old_master_mtu)
1587  		dev_set_mtu(master, old_master_mtu);
1588  out_master_failed:
1589  	return err;
1590  }
1591  
1592  static const struct ethtool_ops dsa_slave_ethtool_ops = {
1593  	.get_drvinfo		= dsa_slave_get_drvinfo,
1594  	.get_regs_len		= dsa_slave_get_regs_len,
1595  	.get_regs		= dsa_slave_get_regs,
1596  	.nway_reset		= dsa_slave_nway_reset,
1597  	.get_link		= ethtool_op_get_link,
1598  	.get_eeprom_len		= dsa_slave_get_eeprom_len,
1599  	.get_eeprom		= dsa_slave_get_eeprom,
1600  	.set_eeprom		= dsa_slave_set_eeprom,
1601  	.get_strings		= dsa_slave_get_strings,
1602  	.get_ethtool_stats	= dsa_slave_get_ethtool_stats,
1603  	.get_sset_count		= dsa_slave_get_sset_count,
1604  	.set_wol		= dsa_slave_set_wol,
1605  	.get_wol		= dsa_slave_get_wol,
1606  	.set_eee		= dsa_slave_set_eee,
1607  	.get_eee		= dsa_slave_get_eee,
1608  	.get_link_ksettings	= dsa_slave_get_link_ksettings,
1609  	.set_link_ksettings	= dsa_slave_set_link_ksettings,
1610  	.get_pauseparam		= dsa_slave_get_pauseparam,
1611  	.set_pauseparam		= dsa_slave_set_pauseparam,
1612  	.get_rxnfc		= dsa_slave_get_rxnfc,
1613  	.set_rxnfc		= dsa_slave_set_rxnfc,
1614  	.get_ts_info		= dsa_slave_get_ts_info,
1615  };
1616  
1617  /* legacy way, bypassing the bridge *****************************************/
1618  static int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
1619  			      struct net_device *dev,
1620  			      const unsigned char *addr, u16 vid,
1621  			      u16 flags,
1622  			      struct netlink_ext_ack *extack)
1623  {
1624  	struct dsa_port *dp = dsa_slave_to_port(dev);
1625  
1626  	return dsa_port_fdb_add(dp, addr, vid);
1627  }
1628  
1629  static int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
1630  			      struct net_device *dev,
1631  			      const unsigned char *addr, u16 vid)
1632  {
1633  	struct dsa_port *dp = dsa_slave_to_port(dev);
1634  
1635  	return dsa_port_fdb_del(dp, addr, vid);
1636  }
1637  
1638  static struct devlink_port *dsa_slave_get_devlink_port(struct net_device *dev)
1639  {
1640  	struct dsa_port *dp = dsa_slave_to_port(dev);
1641  
1642  	return dp->ds->devlink ? &dp->devlink_port : NULL;
1643  }
1644  
1645  static void dsa_slave_get_stats64(struct net_device *dev,
1646  				  struct rtnl_link_stats64 *s)
1647  {
1648  	struct dsa_port *dp = dsa_slave_to_port(dev);
1649  	struct dsa_switch *ds = dp->ds;
1650  
1651  	if (ds->ops->get_stats64)
1652  		ds->ops->get_stats64(ds, dp->index, s);
1653  	else
1654  		dev_get_tstats64(dev, s);
1655  }
1656  
1657  static const struct net_device_ops dsa_slave_netdev_ops = {
1658  	.ndo_open	 	= dsa_slave_open,
1659  	.ndo_stop		= dsa_slave_close,
1660  	.ndo_start_xmit		= dsa_slave_xmit,
1661  	.ndo_change_rx_flags	= dsa_slave_change_rx_flags,
1662  	.ndo_set_rx_mode	= dsa_slave_set_rx_mode,
1663  	.ndo_set_mac_address	= dsa_slave_set_mac_address,
1664  	.ndo_fdb_add		= dsa_legacy_fdb_add,
1665  	.ndo_fdb_del		= dsa_legacy_fdb_del,
1666  	.ndo_fdb_dump		= dsa_slave_fdb_dump,
1667  	.ndo_do_ioctl		= dsa_slave_ioctl,
1668  	.ndo_get_iflink		= dsa_slave_get_iflink,
1669  #ifdef CONFIG_NET_POLL_CONTROLLER
1670  	.ndo_netpoll_setup	= dsa_slave_netpoll_setup,
1671  	.ndo_netpoll_cleanup	= dsa_slave_netpoll_cleanup,
1672  	.ndo_poll_controller	= dsa_slave_poll_controller,
1673  #endif
1674  	.ndo_get_phys_port_name	= dsa_slave_get_phys_port_name,
1675  	.ndo_setup_tc		= dsa_slave_setup_tc,
1676  	.ndo_get_stats64	= dsa_slave_get_stats64,
1677  	.ndo_get_port_parent_id	= dsa_slave_get_port_parent_id,
1678  	.ndo_vlan_rx_add_vid	= dsa_slave_vlan_rx_add_vid,
1679  	.ndo_vlan_rx_kill_vid	= dsa_slave_vlan_rx_kill_vid,
1680  	.ndo_get_devlink_port	= dsa_slave_get_devlink_port,
1681  	.ndo_change_mtu		= dsa_slave_change_mtu,
1682  };
1683  
1684  static struct device_type dsa_type = {
1685  	.name	= "dsa",
1686  };
1687  
1688  void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
1689  {
1690  	const struct dsa_port *dp = dsa_to_port(ds, port);
1691  
1692  	if (dp->pl)
1693  		phylink_mac_change(dp->pl, up);
1694  }
1695  EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
1696  
1697  static void dsa_slave_phylink_fixed_state(struct phylink_config *config,
1698  					  struct phylink_link_state *state)
1699  {
1700  	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1701  	struct dsa_switch *ds = dp->ds;
1702  
1703  	/* No need to check that this operation is valid, the callback would
1704  	 * not be called if it was not.
1705  	 */
1706  	ds->ops->phylink_fixed_state(ds, dp->index, state);
1707  }
1708  
1709  /* slave device setup *******************************************************/
1710  static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr)
1711  {
1712  	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1713  	struct dsa_switch *ds = dp->ds;
1714  
1715  	slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
1716  	if (!slave_dev->phydev) {
1717  		netdev_err(slave_dev, "no phy at %d\n", addr);
1718  		return -ENODEV;
1719  	}
1720  
1721  	return phylink_connect_phy(dp->pl, slave_dev->phydev);
1722  }
1723  
1724  static int dsa_slave_phy_setup(struct net_device *slave_dev)
1725  {
1726  	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1727  	struct device_node *port_dn = dp->dn;
1728  	struct dsa_switch *ds = dp->ds;
1729  	phy_interface_t mode;
1730  	u32 phy_flags = 0;
1731  	int ret;
1732  
1733  	ret = of_get_phy_mode(port_dn, &mode);
1734  	if (ret)
1735  		mode = PHY_INTERFACE_MODE_NA;
1736  
1737  	dp->pl_config.dev = &slave_dev->dev;
1738  	dp->pl_config.type = PHYLINK_NETDEV;
1739  
1740  	/* The get_fixed_state callback takes precedence over polling the
1741  	 * link GPIO in PHYLINK (see phylink_get_fixed_state).  Only set
1742  	 * this if the switch provides such a callback.
1743  	 */
1744  	if (ds->ops->phylink_fixed_state) {
1745  		dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state;
1746  		dp->pl_config.poll_fixed_state = true;
1747  	}
1748  
1749  	dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), mode,
1750  				&dsa_port_phylink_mac_ops);
1751  	if (IS_ERR(dp->pl)) {
1752  		netdev_err(slave_dev,
1753  			   "error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1754  		return PTR_ERR(dp->pl);
1755  	}
1756  
1757  	if (ds->ops->get_phy_flags)
1758  		phy_flags = ds->ops->get_phy_flags(ds, dp->index);
1759  
1760  	ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
1761  	if (ret == -ENODEV && ds->slave_mii_bus) {
1762  		/* We could not connect to a designated PHY or SFP, so try to
1763  		 * use the switch internal MDIO bus instead
1764  		 */
1765  		ret = dsa_slave_phy_connect(slave_dev, dp->index);
1766  		if (ret) {
1767  			netdev_err(slave_dev,
1768  				   "failed to connect to port %d: %d\n",
1769  				   dp->index, ret);
1770  			phylink_destroy(dp->pl);
1771  			return ret;
1772  		}
1773  	}
1774  
1775  	return ret;
1776  }
1777  
1778  void dsa_slave_setup_tagger(struct net_device *slave)
1779  {
1780  	struct dsa_port *dp = dsa_slave_to_port(slave);
1781  	struct dsa_slave_priv *p = netdev_priv(slave);
1782  	const struct dsa_port *cpu_dp = dp->cpu_dp;
1783  	struct net_device *master = cpu_dp->master;
1784  
1785  	if (cpu_dp->tag_ops->tail_tag)
1786  		slave->needed_tailroom = cpu_dp->tag_ops->overhead;
1787  	else
1788  		slave->needed_headroom = cpu_dp->tag_ops->overhead;
1789  	/* Try to save one extra realloc later in the TX path (in the master)
1790  	 * by also inheriting the master's needed headroom and tailroom.
1791  	 * The 8021q driver also does this.
1792  	 */
1793  	slave->needed_headroom += master->needed_headroom;
1794  	slave->needed_tailroom += master->needed_tailroom;
1795  
1796  	p->xmit = cpu_dp->tag_ops->xmit;
1797  }
1798  
1799  static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
1800  static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
1801  					    struct netdev_queue *txq,
1802  					    void *_unused)
1803  {
1804  	lockdep_set_class(&txq->_xmit_lock,
1805  			  &dsa_slave_netdev_xmit_lock_key);
1806  }
1807  
1808  int dsa_slave_suspend(struct net_device *slave_dev)
1809  {
1810  	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1811  
1812  	if (!netif_running(slave_dev))
1813  		return 0;
1814  
1815  	netif_device_detach(slave_dev);
1816  
1817  	rtnl_lock();
1818  	phylink_stop(dp->pl);
1819  	rtnl_unlock();
1820  
1821  	return 0;
1822  }
1823  
1824  int dsa_slave_resume(struct net_device *slave_dev)
1825  {
1826  	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1827  
1828  	if (!netif_running(slave_dev))
1829  		return 0;
1830  
1831  	netif_device_attach(slave_dev);
1832  
1833  	rtnl_lock();
1834  	phylink_start(dp->pl);
1835  	rtnl_unlock();
1836  
1837  	return 0;
1838  }
1839  
1840  int dsa_slave_create(struct dsa_port *port)
1841  {
1842  	const struct dsa_port *cpu_dp = port->cpu_dp;
1843  	struct net_device *master = cpu_dp->master;
1844  	struct dsa_switch *ds = port->ds;
1845  	const char *name = port->name;
1846  	struct net_device *slave_dev;
1847  	struct dsa_slave_priv *p;
1848  	int ret;
1849  
1850  	if (!ds->num_tx_queues)
1851  		ds->num_tx_queues = 1;
1852  
1853  	slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
1854  				     NET_NAME_UNKNOWN, ether_setup,
1855  				     ds->num_tx_queues, 1);
1856  	if (slave_dev == NULL)
1857  		return -ENOMEM;
1858  
1859  	slave_dev->features = master->vlan_features | NETIF_F_HW_TC;
1860  	if (ds->ops->port_vlan_add && ds->ops->port_vlan_del)
1861  		slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1862  	slave_dev->hw_features |= NETIF_F_HW_TC;
1863  	slave_dev->features |= NETIF_F_LLTX;
1864  	slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
1865  	if (!IS_ERR_OR_NULL(port->mac))
1866  		ether_addr_copy(slave_dev->dev_addr, port->mac);
1867  	else
1868  		eth_hw_addr_inherit(slave_dev, master);
1869  	slave_dev->priv_flags |= IFF_NO_QUEUE;
1870  	slave_dev->netdev_ops = &dsa_slave_netdev_ops;
1871  	if (ds->ops->port_max_mtu)
1872  		slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
1873  	SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
1874  
1875  	netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
1876  				 NULL);
1877  
1878  	SET_NETDEV_DEV(slave_dev, port->ds->dev);
1879  	slave_dev->dev.of_node = port->dn;
1880  	slave_dev->vlan_features = master->vlan_features;
1881  
1882  	p = netdev_priv(slave_dev);
1883  	slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1884  	if (!slave_dev->tstats) {
1885  		free_netdev(slave_dev);
1886  		return -ENOMEM;
1887  	}
1888  
1889  	ret = gro_cells_init(&p->gcells, slave_dev);
1890  	if (ret)
1891  		goto out_free;
1892  
1893  	p->dp = port;
1894  	INIT_LIST_HEAD(&p->mall_tc_list);
1895  	port->slave = slave_dev;
1896  	dsa_slave_setup_tagger(slave_dev);
1897  
1898  	rtnl_lock();
1899  	ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
1900  	rtnl_unlock();
1901  	if (ret && ret != -EOPNOTSUPP)
1902  		dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
1903  			 ret, ETH_DATA_LEN, port->index);
1904  
1905  	netif_carrier_off(slave_dev);
1906  
1907  	ret = dsa_slave_phy_setup(slave_dev);
1908  	if (ret) {
1909  		netdev_err(slave_dev,
1910  			   "error %d setting up PHY for tree %d, switch %d, port %d\n",
1911  			   ret, ds->dst->index, ds->index, port->index);
1912  		goto out_gcells;
1913  	}
1914  
1915  	rtnl_lock();
1916  
1917  	ret = register_netdevice(slave_dev);
1918  	if (ret) {
1919  		netdev_err(master, "error %d registering interface %s\n",
1920  			   ret, slave_dev->name);
1921  		rtnl_unlock();
1922  		goto out_phy;
1923  	}
1924  
1925  	ret = netdev_upper_dev_link(master, slave_dev, NULL);
1926  
1927  	rtnl_unlock();
1928  
1929  	if (ret)
1930  		goto out_unregister;
1931  
1932  	return 0;
1933  
1934  out_unregister:
1935  	unregister_netdev(slave_dev);
1936  out_phy:
1937  	rtnl_lock();
1938  	phylink_disconnect_phy(p->dp->pl);
1939  	rtnl_unlock();
1940  	phylink_destroy(p->dp->pl);
1941  out_gcells:
1942  	gro_cells_destroy(&p->gcells);
1943  out_free:
1944  	free_percpu(slave_dev->tstats);
1945  	free_netdev(slave_dev);
1946  	port->slave = NULL;
1947  	return ret;
1948  }
1949  
1950  void dsa_slave_destroy(struct net_device *slave_dev)
1951  {
1952  	struct net_device *master = dsa_slave_to_master(slave_dev);
1953  	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1954  	struct dsa_slave_priv *p = netdev_priv(slave_dev);
1955  
1956  	netif_carrier_off(slave_dev);
1957  	rtnl_lock();
1958  	netdev_upper_dev_unlink(master, slave_dev);
1959  	unregister_netdevice(slave_dev);
1960  	phylink_disconnect_phy(dp->pl);
1961  	rtnl_unlock();
1962  
1963  	phylink_destroy(dp->pl);
1964  	gro_cells_destroy(&p->gcells);
1965  	free_percpu(slave_dev->tstats);
1966  	free_netdev(slave_dev);
1967  }
1968  
1969  bool dsa_slave_dev_check(const struct net_device *dev)
1970  {
1971  	return dev->netdev_ops == &dsa_slave_netdev_ops;
1972  }
1973  EXPORT_SYMBOL_GPL(dsa_slave_dev_check);
1974  
1975  static int dsa_slave_changeupper(struct net_device *dev,
1976  				 struct netdev_notifier_changeupper_info *info)
1977  {
1978  	struct dsa_port *dp = dsa_slave_to_port(dev);
1979  	int err = NOTIFY_DONE;
1980  
1981  	if (netif_is_bridge_master(info->upper_dev)) {
1982  		if (info->linking) {
1983  			err = dsa_port_bridge_join(dp, info->upper_dev);
1984  			if (!err)
1985  				dsa_bridge_mtu_normalization(dp);
1986  			err = notifier_from_errno(err);
1987  		} else {
1988  			dsa_port_bridge_leave(dp, info->upper_dev);
1989  			err = NOTIFY_OK;
1990  		}
1991  	} else if (netif_is_lag_master(info->upper_dev)) {
1992  		if (info->linking) {
1993  			err = dsa_port_lag_join(dp, info->upper_dev,
1994  						info->upper_info);
1995  			if (err == -EOPNOTSUPP) {
1996  				NL_SET_ERR_MSG_MOD(info->info.extack,
1997  						   "Offloading not supported");
1998  				err = 0;
1999  			}
2000  			err = notifier_from_errno(err);
2001  		} else {
2002  			dsa_port_lag_leave(dp, info->upper_dev);
2003  			err = NOTIFY_OK;
2004  		}
2005  	} else if (is_hsr_master(info->upper_dev)) {
2006  		if (info->linking) {
2007  			err = dsa_port_hsr_join(dp, info->upper_dev);
2008  			if (err == -EOPNOTSUPP) {
2009  				NL_SET_ERR_MSG_MOD(info->info.extack,
2010  						   "Offloading not supported");
2011  				err = 0;
2012  			}
2013  			err = notifier_from_errno(err);
2014  		} else {
2015  			dsa_port_hsr_leave(dp, info->upper_dev);
2016  			err = NOTIFY_OK;
2017  		}
2018  	}
2019  
2020  	return err;
2021  }
2022  
2023  static int
2024  dsa_slave_lag_changeupper(struct net_device *dev,
2025  			  struct netdev_notifier_changeupper_info *info)
2026  {
2027  	struct net_device *lower;
2028  	struct list_head *iter;
2029  	int err = NOTIFY_DONE;
2030  	struct dsa_port *dp;
2031  
2032  	netdev_for_each_lower_dev(dev, lower, iter) {
2033  		if (!dsa_slave_dev_check(lower))
2034  			continue;
2035  
2036  		dp = dsa_slave_to_port(lower);
2037  		if (!dp->lag_dev)
2038  			/* Software LAG */
2039  			continue;
2040  
2041  		err = dsa_slave_changeupper(lower, info);
2042  		if (notifier_to_errno(err))
2043  			break;
2044  	}
2045  
2046  	return err;
2047  }
2048  
2049  static int
2050  dsa_prevent_bridging_8021q_upper(struct net_device *dev,
2051  				 struct netdev_notifier_changeupper_info *info)
2052  {
2053  	struct netlink_ext_ack *ext_ack;
2054  	struct net_device *slave;
2055  	struct dsa_port *dp;
2056  
2057  	ext_ack = netdev_notifier_info_to_extack(&info->info);
2058  
2059  	if (!is_vlan_dev(dev))
2060  		return NOTIFY_DONE;
2061  
2062  	slave = vlan_dev_real_dev(dev);
2063  	if (!dsa_slave_dev_check(slave))
2064  		return NOTIFY_DONE;
2065  
2066  	dp = dsa_slave_to_port(slave);
2067  	if (!dp->bridge_dev)
2068  		return NOTIFY_DONE;
2069  
2070  	/* Deny enslaving a VLAN device into a VLAN-aware bridge */
2071  	if (br_vlan_enabled(dp->bridge_dev) &&
2072  	    netif_is_bridge_master(info->upper_dev) && info->linking) {
2073  		NL_SET_ERR_MSG_MOD(ext_ack,
2074  				   "Cannot enslave VLAN device into VLAN aware bridge");
2075  		return notifier_from_errno(-EINVAL);
2076  	}
2077  
2078  	return NOTIFY_DONE;
2079  }
2080  
2081  static int
2082  dsa_slave_check_8021q_upper(struct net_device *dev,
2083  			    struct netdev_notifier_changeupper_info *info)
2084  {
2085  	struct dsa_port *dp = dsa_slave_to_port(dev);
2086  	struct net_device *br = dp->bridge_dev;
2087  	struct bridge_vlan_info br_info;
2088  	struct netlink_ext_ack *extack;
2089  	int err = NOTIFY_DONE;
2090  	u16 vid;
2091  
2092  	if (!br || !br_vlan_enabled(br))
2093  		return NOTIFY_DONE;
2094  
2095  	extack = netdev_notifier_info_to_extack(&info->info);
2096  	vid = vlan_dev_vlan_id(info->upper_dev);
2097  
2098  	/* br_vlan_get_info() returns -EINVAL or -ENOENT if the
2099  	 * device, respectively the VID is not found, returning
2100  	 * 0 means success, which is a failure for us here.
2101  	 */
2102  	err = br_vlan_get_info(br, vid, &br_info);
2103  	if (err == 0) {
2104  		NL_SET_ERR_MSG_MOD(extack,
2105  				   "This VLAN is already configured by the bridge");
2106  		return notifier_from_errno(-EBUSY);
2107  	}
2108  
2109  	return NOTIFY_DONE;
2110  }
2111  
2112  static int dsa_slave_netdevice_event(struct notifier_block *nb,
2113  				     unsigned long event, void *ptr)
2114  {
2115  	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2116  
2117  	switch (event) {
2118  	case NETDEV_PRECHANGEUPPER: {
2119  		struct netdev_notifier_changeupper_info *info = ptr;
2120  		struct dsa_switch *ds;
2121  		struct dsa_port *dp;
2122  		int err;
2123  
2124  		if (!dsa_slave_dev_check(dev))
2125  			return dsa_prevent_bridging_8021q_upper(dev, ptr);
2126  
2127  		dp = dsa_slave_to_port(dev);
2128  		ds = dp->ds;
2129  
2130  		if (ds->ops->port_prechangeupper) {
2131  			err = ds->ops->port_prechangeupper(ds, dp->index, info);
2132  			if (err)
2133  				return notifier_from_errno(err);
2134  		}
2135  
2136  		if (is_vlan_dev(info->upper_dev))
2137  			return dsa_slave_check_8021q_upper(dev, ptr);
2138  		break;
2139  	}
2140  	case NETDEV_CHANGEUPPER:
2141  		if (dsa_slave_dev_check(dev))
2142  			return dsa_slave_changeupper(dev, ptr);
2143  
2144  		if (netif_is_lag_master(dev))
2145  			return dsa_slave_lag_changeupper(dev, ptr);
2146  
2147  		break;
2148  	case NETDEV_CHANGELOWERSTATE: {
2149  		struct netdev_notifier_changelowerstate_info *info = ptr;
2150  		struct dsa_port *dp;
2151  		int err;
2152  
2153  		if (!dsa_slave_dev_check(dev))
2154  			break;
2155  
2156  		dp = dsa_slave_to_port(dev);
2157  
2158  		err = dsa_port_lag_change(dp, info->lower_state_info);
2159  		return notifier_from_errno(err);
2160  	}
2161  	case NETDEV_GOING_DOWN: {
2162  		struct dsa_port *dp, *cpu_dp;
2163  		struct dsa_switch_tree *dst;
2164  		LIST_HEAD(close_list);
2165  
2166  		if (!netdev_uses_dsa(dev))
2167  			return NOTIFY_DONE;
2168  
2169  		cpu_dp = dev->dsa_ptr;
2170  		dst = cpu_dp->ds->dst;
2171  
2172  		list_for_each_entry(dp, &dst->ports, list) {
2173  			if (!dsa_is_user_port(dp->ds, dp->index))
2174  				continue;
2175  
2176  			list_add(&dp->slave->close_list, &close_list);
2177  		}
2178  
2179  		dev_close_many(&close_list, true);
2180  
2181  		return NOTIFY_OK;
2182  	}
2183  	default:
2184  		break;
2185  	}
2186  
2187  	return NOTIFY_DONE;
2188  }
2189  
2190  static void
2191  dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
2192  {
2193  	struct dsa_switch *ds = switchdev_work->ds;
2194  	struct switchdev_notifier_fdb_info info;
2195  	struct dsa_port *dp;
2196  
2197  	if (!dsa_is_user_port(ds, switchdev_work->port))
2198  		return;
2199  
2200  	info.addr = switchdev_work->addr;
2201  	info.vid = switchdev_work->vid;
2202  	info.offloaded = true;
2203  	dp = dsa_to_port(ds, switchdev_work->port);
2204  	call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
2205  				 dp->slave, &info.info, NULL);
2206  }
2207  
2208  static void dsa_slave_switchdev_event_work(struct work_struct *work)
2209  {
2210  	struct dsa_switchdev_event_work *switchdev_work =
2211  		container_of(work, struct dsa_switchdev_event_work, work);
2212  	struct dsa_switch *ds = switchdev_work->ds;
2213  	struct dsa_port *dp;
2214  	int err;
2215  
2216  	dp = dsa_to_port(ds, switchdev_work->port);
2217  
2218  	rtnl_lock();
2219  	switch (switchdev_work->event) {
2220  	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2221  		err = dsa_port_fdb_add(dp, switchdev_work->addr,
2222  				       switchdev_work->vid);
2223  		if (err) {
2224  			dev_err(ds->dev,
2225  				"port %d failed to add %pM vid %d to fdb: %d\n",
2226  				dp->index, switchdev_work->addr,
2227  				switchdev_work->vid, err);
2228  			break;
2229  		}
2230  		dsa_fdb_offload_notify(switchdev_work);
2231  		break;
2232  
2233  	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2234  		err = dsa_port_fdb_del(dp, switchdev_work->addr,
2235  				       switchdev_work->vid);
2236  		if (err) {
2237  			dev_err(ds->dev,
2238  				"port %d failed to delete %pM vid %d from fdb: %d\n",
2239  				dp->index, switchdev_work->addr,
2240  				switchdev_work->vid, err);
2241  		}
2242  
2243  		break;
2244  	}
2245  	rtnl_unlock();
2246  
2247  	kfree(switchdev_work);
2248  	if (dsa_is_user_port(ds, dp->index))
2249  		dev_put(dp->slave);
2250  }
2251  
2252  static int dsa_lower_dev_walk(struct net_device *lower_dev,
2253  			      struct netdev_nested_priv *priv)
2254  {
2255  	if (dsa_slave_dev_check(lower_dev)) {
2256  		priv->data = (void *)netdev_priv(lower_dev);
2257  		return 1;
2258  	}
2259  
2260  	return 0;
2261  }
2262  
2263  static struct dsa_slave_priv *dsa_slave_dev_lower_find(struct net_device *dev)
2264  {
2265  	struct netdev_nested_priv priv = {
2266  		.data = NULL,
2267  	};
2268  
2269  	netdev_walk_all_lower_dev_rcu(dev, dsa_lower_dev_walk, &priv);
2270  
2271  	return (struct dsa_slave_priv *)priv.data;
2272  }
2273  
2274  /* Called under rcu_read_lock() */
2275  static int dsa_slave_switchdev_event(struct notifier_block *unused,
2276  				     unsigned long event, void *ptr)
2277  {
2278  	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2279  	const struct switchdev_notifier_fdb_info *fdb_info;
2280  	struct dsa_switchdev_event_work *switchdev_work;
2281  	struct dsa_port *dp;
2282  	int err;
2283  
2284  	switch (event) {
2285  	case SWITCHDEV_PORT_ATTR_SET:
2286  		err = switchdev_handle_port_attr_set(dev, ptr,
2287  						     dsa_slave_dev_check,
2288  						     dsa_slave_port_attr_set);
2289  		return notifier_from_errno(err);
2290  	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2291  	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2292  		fdb_info = ptr;
2293  
2294  		if (dsa_slave_dev_check(dev)) {
2295  			if (!fdb_info->added_by_user)
2296  				return NOTIFY_OK;
2297  
2298  			dp = dsa_slave_to_port(dev);
2299  		} else {
2300  			/* Snoop addresses learnt on foreign interfaces
2301  			 * bridged with us, for switches that don't
2302  			 * automatically learn SA from CPU-injected traffic
2303  			 */
2304  			struct net_device *br_dev;
2305  			struct dsa_slave_priv *p;
2306  
2307  			br_dev = netdev_master_upper_dev_get_rcu(dev);
2308  			if (!br_dev)
2309  				return NOTIFY_DONE;
2310  
2311  			if (!netif_is_bridge_master(br_dev))
2312  				return NOTIFY_DONE;
2313  
2314  			p = dsa_slave_dev_lower_find(br_dev);
2315  			if (!p)
2316  				return NOTIFY_DONE;
2317  
2318  			dp = p->dp->cpu_dp;
2319  
2320  			if (!dp->ds->assisted_learning_on_cpu_port)
2321  				return NOTIFY_DONE;
2322  
2323  			/* When the bridge learns an address on an offloaded
2324  			 * LAG we don't want to send traffic to the CPU, the
2325  			 * other ports bridged with the LAG should be able to
2326  			 * autonomously forward towards it.
2327  			 */
2328  			if (dsa_tree_offloads_bridge_port(dp->ds->dst, dev))
2329  				return NOTIFY_DONE;
2330  		}
2331  
2332  		if (!dp->ds->ops->port_fdb_add || !dp->ds->ops->port_fdb_del)
2333  			return NOTIFY_DONE;
2334  
2335  		switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2336  		if (!switchdev_work)
2337  			return NOTIFY_BAD;
2338  
2339  		INIT_WORK(&switchdev_work->work,
2340  			  dsa_slave_switchdev_event_work);
2341  		switchdev_work->ds = dp->ds;
2342  		switchdev_work->port = dp->index;
2343  		switchdev_work->event = event;
2344  
2345  		ether_addr_copy(switchdev_work->addr,
2346  				fdb_info->addr);
2347  		switchdev_work->vid = fdb_info->vid;
2348  
2349  		/* Hold a reference on the slave for dsa_fdb_offload_notify */
2350  		if (dsa_is_user_port(dp->ds, dp->index))
2351  			dev_hold(dev);
2352  		dsa_schedule_work(&switchdev_work->work);
2353  		break;
2354  	default:
2355  		return NOTIFY_DONE;
2356  	}
2357  
2358  	return NOTIFY_OK;
2359  }
2360  
2361  static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
2362  					      unsigned long event, void *ptr)
2363  {
2364  	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2365  	int err;
2366  
2367  	switch (event) {
2368  	case SWITCHDEV_PORT_OBJ_ADD:
2369  		err = switchdev_handle_port_obj_add(dev, ptr,
2370  						    dsa_slave_dev_check,
2371  						    dsa_slave_port_obj_add);
2372  		return notifier_from_errno(err);
2373  	case SWITCHDEV_PORT_OBJ_DEL:
2374  		err = switchdev_handle_port_obj_del(dev, ptr,
2375  						    dsa_slave_dev_check,
2376  						    dsa_slave_port_obj_del);
2377  		return notifier_from_errno(err);
2378  	case SWITCHDEV_PORT_ATTR_SET:
2379  		err = switchdev_handle_port_attr_set(dev, ptr,
2380  						     dsa_slave_dev_check,
2381  						     dsa_slave_port_attr_set);
2382  		return notifier_from_errno(err);
2383  	}
2384  
2385  	return NOTIFY_DONE;
2386  }
2387  
2388  static struct notifier_block dsa_slave_nb __read_mostly = {
2389  	.notifier_call  = dsa_slave_netdevice_event,
2390  };
2391  
2392  static struct notifier_block dsa_slave_switchdev_notifier = {
2393  	.notifier_call = dsa_slave_switchdev_event,
2394  };
2395  
2396  static struct notifier_block dsa_slave_switchdev_blocking_notifier = {
2397  	.notifier_call = dsa_slave_switchdev_blocking_event,
2398  };
2399  
2400  int dsa_slave_register_notifier(void)
2401  {
2402  	struct notifier_block *nb;
2403  	int err;
2404  
2405  	err = register_netdevice_notifier(&dsa_slave_nb);
2406  	if (err)
2407  		return err;
2408  
2409  	err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
2410  	if (err)
2411  		goto err_switchdev_nb;
2412  
2413  	nb = &dsa_slave_switchdev_blocking_notifier;
2414  	err = register_switchdev_blocking_notifier(nb);
2415  	if (err)
2416  		goto err_switchdev_blocking_nb;
2417  
2418  	return 0;
2419  
2420  err_switchdev_blocking_nb:
2421  	unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
2422  err_switchdev_nb:
2423  	unregister_netdevice_notifier(&dsa_slave_nb);
2424  	return err;
2425  }
2426  
2427  void dsa_slave_unregister_notifier(void)
2428  {
2429  	struct notifier_block *nb;
2430  	int err;
2431  
2432  	nb = &dsa_slave_switchdev_blocking_notifier;
2433  	err = unregister_switchdev_blocking_notifier(nb);
2434  	if (err)
2435  		pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
2436  
2437  	err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
2438  	if (err)
2439  		pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
2440  
2441  	err = unregister_netdevice_notifier(&dsa_slave_nb);
2442  	if (err)
2443  		pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
2444  }
2445