xref: /openbmc/linux/net/dsa/switch.c (revision 058bd857)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch chip, part of a switch fabric
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8 
9 #include <linux/if_bridge.h>
10 #include <linux/netdevice.h>
11 #include <linux/notifier.h>
12 #include <linux/if_vlan.h>
13 #include <net/switchdev.h>
14 
15 #include "dsa_priv.h"
16 
17 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
18 						   unsigned int ageing_time)
19 {
20 	int i;
21 
22 	for (i = 0; i < ds->num_ports; ++i) {
23 		struct dsa_port *dp = &ds->ports[i];
24 
25 		if (dp->ageing_time && dp->ageing_time < ageing_time)
26 			ageing_time = dp->ageing_time;
27 	}
28 
29 	return ageing_time;
30 }
31 
32 static int dsa_switch_ageing_time(struct dsa_switch *ds,
33 				  struct dsa_notifier_ageing_time_info *info)
34 {
35 	unsigned int ageing_time = info->ageing_time;
36 	struct switchdev_trans *trans = info->trans;
37 
38 	if (switchdev_trans_ph_prepare(trans)) {
39 		if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
40 			return -ERANGE;
41 		if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
42 			return -ERANGE;
43 		return 0;
44 	}
45 
46 	/* Program the fastest ageing time in case of multiple bridges */
47 	ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
48 
49 	if (ds->ops->set_ageing_time)
50 		return ds->ops->set_ageing_time(ds, ageing_time);
51 
52 	return 0;
53 }
54 
55 static int dsa_switch_bridge_join(struct dsa_switch *ds,
56 				  struct dsa_notifier_bridge_info *info)
57 {
58 	if (ds->index == info->sw_index && ds->ops->port_bridge_join)
59 		return ds->ops->port_bridge_join(ds, info->port, info->br);
60 
61 	if (ds->index != info->sw_index && ds->ops->crosschip_bridge_join)
62 		return ds->ops->crosschip_bridge_join(ds, info->sw_index,
63 						      info->port, info->br);
64 
65 	return 0;
66 }
67 
68 static int dsa_switch_bridge_leave(struct dsa_switch *ds,
69 				   struct dsa_notifier_bridge_info *info)
70 {
71 	bool unset_vlan_filtering = br_vlan_enabled(info->br);
72 	int err, i;
73 
74 	if (ds->index == info->sw_index && ds->ops->port_bridge_leave)
75 		ds->ops->port_bridge_leave(ds, info->port, info->br);
76 
77 	if (ds->index != info->sw_index && ds->ops->crosschip_bridge_leave)
78 		ds->ops->crosschip_bridge_leave(ds, info->sw_index, info->port,
79 						info->br);
80 
81 	/* If the bridge was vlan_filtering, the bridge core doesn't trigger an
82 	 * event for changing vlan_filtering setting upon slave ports leaving
83 	 * it. That is a good thing, because that lets us handle it and also
84 	 * handle the case where the switch's vlan_filtering setting is global
85 	 * (not per port). When that happens, the correct moment to trigger the
86 	 * vlan_filtering callback is only when the last port left this bridge.
87 	 */
88 	if (unset_vlan_filtering && ds->vlan_filtering_is_global) {
89 		for (i = 0; i < ds->num_ports; i++) {
90 			if (i == info->port)
91 				continue;
92 			if (dsa_to_port(ds, i)->bridge_dev == info->br) {
93 				unset_vlan_filtering = false;
94 				break;
95 			}
96 		}
97 	}
98 	if (unset_vlan_filtering) {
99 		struct switchdev_trans trans = {0};
100 
101 		err = dsa_port_vlan_filtering(&ds->ports[info->port],
102 					      false, &trans);
103 		if (err && err != EOPNOTSUPP)
104 			return err;
105 	}
106 	return 0;
107 }
108 
109 static int dsa_switch_fdb_add(struct dsa_switch *ds,
110 			      struct dsa_notifier_fdb_info *info)
111 {
112 	int port = dsa_towards_port(ds, info->sw_index, info->port);
113 
114 	if (!ds->ops->port_fdb_add)
115 		return -EOPNOTSUPP;
116 
117 	return ds->ops->port_fdb_add(ds, port, info->addr, info->vid);
118 }
119 
120 static int dsa_switch_fdb_del(struct dsa_switch *ds,
121 			      struct dsa_notifier_fdb_info *info)
122 {
123 	int port = dsa_towards_port(ds, info->sw_index, info->port);
124 
125 	if (!ds->ops->port_fdb_del)
126 		return -EOPNOTSUPP;
127 
128 	return ds->ops->port_fdb_del(ds, port, info->addr, info->vid);
129 }
130 
131 static int
132 dsa_switch_mdb_prepare_bitmap(struct dsa_switch *ds,
133 			      const struct switchdev_obj_port_mdb *mdb,
134 			      const unsigned long *bitmap)
135 {
136 	int port, err;
137 
138 	if (!ds->ops->port_mdb_prepare || !ds->ops->port_mdb_add)
139 		return -EOPNOTSUPP;
140 
141 	for_each_set_bit(port, bitmap, ds->num_ports) {
142 		err = ds->ops->port_mdb_prepare(ds, port, mdb);
143 		if (err)
144 			return err;
145 	}
146 
147 	return 0;
148 }
149 
150 static void dsa_switch_mdb_add_bitmap(struct dsa_switch *ds,
151 				      const struct switchdev_obj_port_mdb *mdb,
152 				      const unsigned long *bitmap)
153 {
154 	int port;
155 
156 	if (!ds->ops->port_mdb_add)
157 		return;
158 
159 	for_each_set_bit(port, bitmap, ds->num_ports)
160 		ds->ops->port_mdb_add(ds, port, mdb);
161 }
162 
163 static int dsa_switch_mdb_add(struct dsa_switch *ds,
164 			      struct dsa_notifier_mdb_info *info)
165 {
166 	const struct switchdev_obj_port_mdb *mdb = info->mdb;
167 	struct switchdev_trans *trans = info->trans;
168 	int port;
169 
170 	/* Build a mask of Multicast group members */
171 	bitmap_zero(ds->bitmap, ds->num_ports);
172 	if (ds->index == info->sw_index)
173 		set_bit(info->port, ds->bitmap);
174 	for (port = 0; port < ds->num_ports; port++)
175 		if (dsa_is_dsa_port(ds, port))
176 			set_bit(port, ds->bitmap);
177 
178 	if (switchdev_trans_ph_prepare(trans))
179 		return dsa_switch_mdb_prepare_bitmap(ds, mdb, ds->bitmap);
180 
181 	dsa_switch_mdb_add_bitmap(ds, mdb, ds->bitmap);
182 
183 	return 0;
184 }
185 
186 static int dsa_switch_mdb_del(struct dsa_switch *ds,
187 			      struct dsa_notifier_mdb_info *info)
188 {
189 	const struct switchdev_obj_port_mdb *mdb = info->mdb;
190 
191 	if (!ds->ops->port_mdb_del)
192 		return -EOPNOTSUPP;
193 
194 	if (ds->index == info->sw_index)
195 		return ds->ops->port_mdb_del(ds, info->port, mdb);
196 
197 	return 0;
198 }
199 
200 static int dsa_port_vlan_device_check(struct net_device *vlan_dev,
201 				      int vlan_dev_vid,
202 				      void *arg)
203 {
204 	struct switchdev_obj_port_vlan *vlan = arg;
205 	u16 vid;
206 
207 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
208 		if (vid == vlan_dev_vid)
209 			return -EBUSY;
210 	}
211 
212 	return 0;
213 }
214 
215 static int dsa_port_vlan_check(struct dsa_switch *ds, int port,
216 			       const struct switchdev_obj_port_vlan *vlan)
217 {
218 	const struct dsa_port *dp = dsa_to_port(ds, port);
219 	int err = 0;
220 
221 	/* Device is not bridged, let it proceed with the VLAN device
222 	 * creation.
223 	 */
224 	if (!dp->bridge_dev)
225 		return err;
226 
227 	/* dsa_slave_vlan_rx_{add,kill}_vid() cannot use the prepare phase and
228 	 * already checks whether there is an overlapping bridge VLAN entry
229 	 * with the same VID, so here we only need to check that if we are
230 	 * adding a bridge VLAN entry there is not an overlapping VLAN device
231 	 * claiming that VID.
232 	 */
233 	return vlan_for_each(dp->slave, dsa_port_vlan_device_check,
234 			     (void *)vlan);
235 }
236 
237 static int
238 dsa_switch_vlan_prepare_bitmap(struct dsa_switch *ds,
239 			       const struct switchdev_obj_port_vlan *vlan,
240 			       const unsigned long *bitmap)
241 {
242 	int port, err;
243 
244 	if (!ds->ops->port_vlan_prepare || !ds->ops->port_vlan_add)
245 		return -EOPNOTSUPP;
246 
247 	for_each_set_bit(port, bitmap, ds->num_ports) {
248 		err = dsa_port_vlan_check(ds, port, vlan);
249 		if (err)
250 			return err;
251 
252 		err = ds->ops->port_vlan_prepare(ds, port, vlan);
253 		if (err)
254 			return err;
255 	}
256 
257 	return 0;
258 }
259 
260 static void
261 dsa_switch_vlan_add_bitmap(struct dsa_switch *ds,
262 			   const struct switchdev_obj_port_vlan *vlan,
263 			   const unsigned long *bitmap)
264 {
265 	int port;
266 
267 	for_each_set_bit(port, bitmap, ds->num_ports)
268 		ds->ops->port_vlan_add(ds, port, vlan);
269 }
270 
271 static int dsa_switch_vlan_add(struct dsa_switch *ds,
272 			       struct dsa_notifier_vlan_info *info)
273 {
274 	const struct switchdev_obj_port_vlan *vlan = info->vlan;
275 	struct switchdev_trans *trans = info->trans;
276 	int port;
277 
278 	/* Build a mask of VLAN members */
279 	bitmap_zero(ds->bitmap, ds->num_ports);
280 	if (ds->index == info->sw_index)
281 		set_bit(info->port, ds->bitmap);
282 	for (port = 0; port < ds->num_ports; port++)
283 		if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
284 			set_bit(port, ds->bitmap);
285 
286 	if (switchdev_trans_ph_prepare(trans))
287 		return dsa_switch_vlan_prepare_bitmap(ds, vlan, ds->bitmap);
288 
289 	dsa_switch_vlan_add_bitmap(ds, vlan, ds->bitmap);
290 
291 	return 0;
292 }
293 
294 static int dsa_switch_vlan_del(struct dsa_switch *ds,
295 			       struct dsa_notifier_vlan_info *info)
296 {
297 	const struct switchdev_obj_port_vlan *vlan = info->vlan;
298 
299 	if (!ds->ops->port_vlan_del)
300 		return -EOPNOTSUPP;
301 
302 	if (ds->index == info->sw_index)
303 		return ds->ops->port_vlan_del(ds, info->port, vlan);
304 
305 	return 0;
306 }
307 
308 static int dsa_switch_event(struct notifier_block *nb,
309 			    unsigned long event, void *info)
310 {
311 	struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
312 	int err;
313 
314 	switch (event) {
315 	case DSA_NOTIFIER_AGEING_TIME:
316 		err = dsa_switch_ageing_time(ds, info);
317 		break;
318 	case DSA_NOTIFIER_BRIDGE_JOIN:
319 		err = dsa_switch_bridge_join(ds, info);
320 		break;
321 	case DSA_NOTIFIER_BRIDGE_LEAVE:
322 		err = dsa_switch_bridge_leave(ds, info);
323 		break;
324 	case DSA_NOTIFIER_FDB_ADD:
325 		err = dsa_switch_fdb_add(ds, info);
326 		break;
327 	case DSA_NOTIFIER_FDB_DEL:
328 		err = dsa_switch_fdb_del(ds, info);
329 		break;
330 	case DSA_NOTIFIER_MDB_ADD:
331 		err = dsa_switch_mdb_add(ds, info);
332 		break;
333 	case DSA_NOTIFIER_MDB_DEL:
334 		err = dsa_switch_mdb_del(ds, info);
335 		break;
336 	case DSA_NOTIFIER_VLAN_ADD:
337 		err = dsa_switch_vlan_add(ds, info);
338 		break;
339 	case DSA_NOTIFIER_VLAN_DEL:
340 		err = dsa_switch_vlan_del(ds, info);
341 		break;
342 	default:
343 		err = -EOPNOTSUPP;
344 		break;
345 	}
346 
347 	/* Non-switchdev operations cannot be rolled back. If a DSA driver
348 	 * returns an error during the chained call, switch chips may be in an
349 	 * inconsistent state.
350 	 */
351 	if (err)
352 		dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
353 			event, err);
354 
355 	return notifier_from_errno(err);
356 }
357 
358 int dsa_switch_register_notifier(struct dsa_switch *ds)
359 {
360 	ds->nb.notifier_call = dsa_switch_event;
361 
362 	return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
363 }
364 
365 void dsa_switch_unregister_notifier(struct dsa_switch *ds)
366 {
367 	int err;
368 
369 	err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
370 	if (err)
371 		dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
372 }
373