1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Microchip switch driver main logic
4  *
5  * Copyright (C) 2017-2019 Microchip Technology Inc.
6  */
7 
8 #include <linux/delay.h>
9 #include <linux/export.h>
10 #include <linux/gpio/consumer.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/platform_data/microchip-ksz.h>
14 #include <linux/phy.h>
15 #include <linux/etherdevice.h>
16 #include <linux/if_bridge.h>
17 #include <linux/of_net.h>
18 #include <net/dsa.h>
19 #include <net/switchdev.h>
20 
21 #include "ksz_common.h"
22 
23 void ksz_update_port_member(struct ksz_device *dev, int port)
24 {
25 	struct ksz_port *p;
26 	int i;
27 
28 	for (i = 0; i < dev->port_cnt; i++) {
29 		if (i == port || i == dev->cpu_port)
30 			continue;
31 		p = &dev->ports[i];
32 		if (!(dev->member & (1 << i)))
33 			continue;
34 
35 		/* Port is a member of the bridge and is forwarding. */
36 		if (p->stp_state == BR_STATE_FORWARDING &&
37 		    p->member != dev->member)
38 			dev->dev_ops->cfg_port_member(dev, i, dev->member);
39 	}
40 }
41 EXPORT_SYMBOL_GPL(ksz_update_port_member);
42 
43 static void port_r_cnt(struct ksz_device *dev, int port)
44 {
45 	struct ksz_port_mib *mib = &dev->ports[port].mib;
46 	u64 *dropped;
47 
48 	/* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
49 	while (mib->cnt_ptr < dev->reg_mib_cnt) {
50 		dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
51 					&mib->counters[mib->cnt_ptr]);
52 		++mib->cnt_ptr;
53 	}
54 
55 	/* last one in storage */
56 	dropped = &mib->counters[dev->mib_cnt];
57 
58 	/* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
59 	while (mib->cnt_ptr < dev->mib_cnt) {
60 		dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
61 					dropped, &mib->counters[mib->cnt_ptr]);
62 		++mib->cnt_ptr;
63 	}
64 	mib->cnt_ptr = 0;
65 }
66 
67 static void ksz_mib_read_work(struct work_struct *work)
68 {
69 	struct ksz_device *dev = container_of(work, struct ksz_device,
70 					      mib_read.work);
71 	struct ksz_port_mib *mib;
72 	struct ksz_port *p;
73 	int i;
74 
75 	for (i = 0; i < dev->port_cnt; i++) {
76 		if (dsa_is_unused_port(dev->ds, i))
77 			continue;
78 
79 		p = &dev->ports[i];
80 		mib = &p->mib;
81 		mutex_lock(&mib->cnt_mutex);
82 
83 		/* Only read MIB counters when the port is told to do.
84 		 * If not, read only dropped counters when link is not up.
85 		 */
86 		if (!p->read) {
87 			const struct dsa_port *dp = dsa_to_port(dev->ds, i);
88 
89 			if (!netif_carrier_ok(dp->slave))
90 				mib->cnt_ptr = dev->reg_mib_cnt;
91 		}
92 		port_r_cnt(dev, i);
93 		p->read = false;
94 		mutex_unlock(&mib->cnt_mutex);
95 	}
96 
97 	schedule_delayed_work(&dev->mib_read, dev->mib_read_interval);
98 }
99 
100 void ksz_init_mib_timer(struct ksz_device *dev)
101 {
102 	int i;
103 
104 	INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work);
105 
106 	for (i = 0; i < dev->port_cnt; i++)
107 		dev->dev_ops->port_init_cnt(dev, i);
108 }
109 EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
110 
111 int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
112 {
113 	struct ksz_device *dev = ds->priv;
114 	u16 val = 0xffff;
115 
116 	dev->dev_ops->r_phy(dev, addr, reg, &val);
117 
118 	return val;
119 }
120 EXPORT_SYMBOL_GPL(ksz_phy_read16);
121 
122 int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
123 {
124 	struct ksz_device *dev = ds->priv;
125 
126 	dev->dev_ops->w_phy(dev, addr, reg, val);
127 
128 	return 0;
129 }
130 EXPORT_SYMBOL_GPL(ksz_phy_write16);
131 
132 void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
133 		       phy_interface_t interface)
134 {
135 	struct ksz_device *dev = ds->priv;
136 	struct ksz_port *p = &dev->ports[port];
137 
138 	/* Read all MIB counters when the link is going down. */
139 	p->read = true;
140 	/* timer started */
141 	if (dev->mib_read_interval)
142 		schedule_delayed_work(&dev->mib_read, 0);
143 }
144 EXPORT_SYMBOL_GPL(ksz_mac_link_down);
145 
146 int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
147 {
148 	struct ksz_device *dev = ds->priv;
149 
150 	if (sset != ETH_SS_STATS)
151 		return 0;
152 
153 	return dev->mib_cnt;
154 }
155 EXPORT_SYMBOL_GPL(ksz_sset_count);
156 
157 void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf)
158 {
159 	const struct dsa_port *dp = dsa_to_port(ds, port);
160 	struct ksz_device *dev = ds->priv;
161 	struct ksz_port_mib *mib;
162 
163 	mib = &dev->ports[port].mib;
164 	mutex_lock(&mib->cnt_mutex);
165 
166 	/* Only read dropped counters if no link. */
167 	if (!netif_carrier_ok(dp->slave))
168 		mib->cnt_ptr = dev->reg_mib_cnt;
169 	port_r_cnt(dev, port);
170 	memcpy(buf, mib->counters, dev->mib_cnt * sizeof(u64));
171 	mutex_unlock(&mib->cnt_mutex);
172 }
173 EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats);
174 
175 int ksz_port_bridge_join(struct dsa_switch *ds, int port,
176 			 struct net_device *br)
177 {
178 	struct ksz_device *dev = ds->priv;
179 
180 	mutex_lock(&dev->dev_mutex);
181 	dev->br_member |= (1 << port);
182 	mutex_unlock(&dev->dev_mutex);
183 
184 	/* port_stp_state_set() will be called after to put the port in
185 	 * appropriate state so there is no need to do anything.
186 	 */
187 
188 	return 0;
189 }
190 EXPORT_SYMBOL_GPL(ksz_port_bridge_join);
191 
192 void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
193 			   struct net_device *br)
194 {
195 	struct ksz_device *dev = ds->priv;
196 
197 	mutex_lock(&dev->dev_mutex);
198 	dev->br_member &= ~(1 << port);
199 	dev->member &= ~(1 << port);
200 	mutex_unlock(&dev->dev_mutex);
201 
202 	/* port_stp_state_set() will be called after to put the port in
203 	 * forwarding state so there is no need to do anything.
204 	 */
205 }
206 EXPORT_SYMBOL_GPL(ksz_port_bridge_leave);
207 
208 void ksz_port_fast_age(struct dsa_switch *ds, int port)
209 {
210 	struct ksz_device *dev = ds->priv;
211 
212 	dev->dev_ops->flush_dyn_mac_table(dev, port);
213 }
214 EXPORT_SYMBOL_GPL(ksz_port_fast_age);
215 
216 int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
217 			  const struct switchdev_obj_port_vlan *vlan)
218 {
219 	/* nothing needed */
220 
221 	return 0;
222 }
223 EXPORT_SYMBOL_GPL(ksz_port_vlan_prepare);
224 
225 int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
226 		      void *data)
227 {
228 	struct ksz_device *dev = ds->priv;
229 	int ret = 0;
230 	u16 i = 0;
231 	u16 entries = 0;
232 	u8 timestamp = 0;
233 	u8 fid;
234 	u8 member;
235 	struct alu_struct alu;
236 
237 	do {
238 		alu.is_static = false;
239 		ret = dev->dev_ops->r_dyn_mac_table(dev, i, alu.mac, &fid,
240 						    &member, &timestamp,
241 						    &entries);
242 		if (!ret && (member & BIT(port))) {
243 			ret = cb(alu.mac, alu.fid, alu.is_static, data);
244 			if (ret)
245 				break;
246 		}
247 		i++;
248 	} while (i < entries);
249 	if (i >= entries)
250 		ret = 0;
251 
252 	return ret;
253 }
254 EXPORT_SYMBOL_GPL(ksz_port_fdb_dump);
255 
256 int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
257 			 const struct switchdev_obj_port_mdb *mdb)
258 {
259 	/* nothing to do */
260 	return 0;
261 }
262 EXPORT_SYMBOL_GPL(ksz_port_mdb_prepare);
263 
264 void ksz_port_mdb_add(struct dsa_switch *ds, int port,
265 		      const struct switchdev_obj_port_mdb *mdb)
266 {
267 	struct ksz_device *dev = ds->priv;
268 	struct alu_struct alu;
269 	int index;
270 	int empty = 0;
271 
272 	alu.port_forward = 0;
273 	for (index = 0; index < dev->num_statics; index++) {
274 		if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
275 			/* Found one already in static MAC table. */
276 			if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
277 			    alu.fid == mdb->vid)
278 				break;
279 		/* Remember the first empty entry. */
280 		} else if (!empty) {
281 			empty = index + 1;
282 		}
283 	}
284 
285 	/* no available entry */
286 	if (index == dev->num_statics && !empty)
287 		return;
288 
289 	/* add entry */
290 	if (index == dev->num_statics) {
291 		index = empty - 1;
292 		memset(&alu, 0, sizeof(alu));
293 		memcpy(alu.mac, mdb->addr, ETH_ALEN);
294 		alu.is_static = true;
295 	}
296 	alu.port_forward |= BIT(port);
297 	if (mdb->vid) {
298 		alu.is_use_fid = true;
299 
300 		/* Need a way to map VID to FID. */
301 		alu.fid = mdb->vid;
302 	}
303 	dev->dev_ops->w_sta_mac_table(dev, index, &alu);
304 }
305 EXPORT_SYMBOL_GPL(ksz_port_mdb_add);
306 
307 int ksz_port_mdb_del(struct dsa_switch *ds, int port,
308 		     const struct switchdev_obj_port_mdb *mdb)
309 {
310 	struct ksz_device *dev = ds->priv;
311 	struct alu_struct alu;
312 	int index;
313 	int ret = 0;
314 
315 	for (index = 0; index < dev->num_statics; index++) {
316 		if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
317 			/* Found one already in static MAC table. */
318 			if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
319 			    alu.fid == mdb->vid)
320 				break;
321 		}
322 	}
323 
324 	/* no available entry */
325 	if (index == dev->num_statics)
326 		goto exit;
327 
328 	/* clear port */
329 	alu.port_forward &= ~BIT(port);
330 	if (!alu.port_forward)
331 		alu.is_static = false;
332 	dev->dev_ops->w_sta_mac_table(dev, index, &alu);
333 
334 exit:
335 	return ret;
336 }
337 EXPORT_SYMBOL_GPL(ksz_port_mdb_del);
338 
339 int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
340 {
341 	struct ksz_device *dev = ds->priv;
342 
343 	if (!dsa_is_user_port(ds, port))
344 		return 0;
345 
346 	/* setup slave port */
347 	dev->dev_ops->port_setup(dev, port, false);
348 
349 	/* port_stp_state_set() will be called after to enable the port so
350 	 * there is no need to do anything.
351 	 */
352 
353 	return 0;
354 }
355 EXPORT_SYMBOL_GPL(ksz_enable_port);
356 
357 struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
358 {
359 	struct dsa_switch *ds;
360 	struct ksz_device *swdev;
361 
362 	ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
363 	if (!ds)
364 		return NULL;
365 
366 	ds->dev = base;
367 	ds->num_ports = DSA_MAX_PORTS;
368 
369 	swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL);
370 	if (!swdev)
371 		return NULL;
372 
373 	ds->priv = swdev;
374 	swdev->dev = base;
375 
376 	swdev->ds = ds;
377 	swdev->priv = priv;
378 
379 	return swdev;
380 }
381 EXPORT_SYMBOL(ksz_switch_alloc);
382 
383 int ksz_switch_register(struct ksz_device *dev,
384 			const struct ksz_dev_ops *ops)
385 {
386 	struct device_node *port, *ports;
387 	phy_interface_t interface;
388 	unsigned int port_num;
389 	int ret;
390 
391 	if (dev->pdata)
392 		dev->chip_id = dev->pdata->chip_id;
393 
394 	dev->reset_gpio = devm_gpiod_get_optional(dev->dev, "reset",
395 						  GPIOD_OUT_LOW);
396 	if (IS_ERR(dev->reset_gpio))
397 		return PTR_ERR(dev->reset_gpio);
398 
399 	if (dev->reset_gpio) {
400 		gpiod_set_value_cansleep(dev->reset_gpio, 1);
401 		usleep_range(10000, 12000);
402 		gpiod_set_value_cansleep(dev->reset_gpio, 0);
403 		usleep_range(100, 1000);
404 	}
405 
406 	mutex_init(&dev->dev_mutex);
407 	mutex_init(&dev->regmap_mutex);
408 	mutex_init(&dev->alu_mutex);
409 	mutex_init(&dev->vlan_mutex);
410 
411 	dev->dev_ops = ops;
412 
413 	if (dev->dev_ops->detect(dev))
414 		return -EINVAL;
415 
416 	ret = dev->dev_ops->init(dev);
417 	if (ret)
418 		return ret;
419 
420 	/* Host port interface will be self detected, or specifically set in
421 	 * device tree.
422 	 */
423 	for (port_num = 0; port_num < dev->port_cnt; ++port_num)
424 		dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA;
425 	if (dev->dev->of_node) {
426 		ret = of_get_phy_mode(dev->dev->of_node, &interface);
427 		if (ret == 0)
428 			dev->compat_interface = interface;
429 		ports = of_get_child_by_name(dev->dev->of_node, "ethernet-ports");
430 		if (!ports)
431 			ports = of_get_child_by_name(dev->dev->of_node, "ports");
432 		if (ports)
433 			for_each_available_child_of_node(ports, port) {
434 				if (of_property_read_u32(port, "reg",
435 							 &port_num))
436 					continue;
437 				if (port_num >= dev->port_cnt)
438 					return -EINVAL;
439 				of_get_phy_mode(port,
440 						&dev->ports[port_num].interface);
441 			}
442 		dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
443 							 "microchip,synclko-125");
444 	}
445 
446 	ret = dsa_register_switch(dev->ds);
447 	if (ret) {
448 		dev->dev_ops->exit(dev);
449 		return ret;
450 	}
451 
452 	/* Read MIB counters every 30 seconds to avoid overflow. */
453 	dev->mib_read_interval = msecs_to_jiffies(30000);
454 
455 	/* Start the MIB timer. */
456 	schedule_delayed_work(&dev->mib_read, 0);
457 
458 	return 0;
459 }
460 EXPORT_SYMBOL(ksz_switch_register);
461 
462 void ksz_switch_remove(struct ksz_device *dev)
463 {
464 	/* timer started */
465 	if (dev->mib_read_interval)
466 		cancel_delayed_work_sync(&dev->mib_read);
467 
468 	dev->dev_ops->exit(dev);
469 	dsa_unregister_switch(dev->ds);
470 
471 	if (dev->reset_gpio)
472 		gpiod_set_value_cansleep(dev->reset_gpio, 1);
473 
474 }
475 EXPORT_SYMBOL(ksz_switch_remove);
476 
477 MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
478 MODULE_DESCRIPTION("Microchip KSZ Series Switch DSA Driver");
479 MODULE_LICENSE("GPL");
480