1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Microchip switch driver main logic
4  *
5  * Copyright (C) 2017-2019 Microchip Technology Inc.
6  */
7 
8 #include <linux/delay.h>
9 #include <linux/export.h>
10 #include <linux/gpio/consumer.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/platform_data/microchip-ksz.h>
14 #include <linux/phy.h>
15 #include <linux/etherdevice.h>
16 #include <linux/if_bridge.h>
17 #include <linux/of_net.h>
18 #include <net/dsa.h>
19 #include <net/switchdev.h>
20 
21 #include "ksz_priv.h"
22 
23 void ksz_port_cleanup(struct ksz_device *dev, int port)
24 {
25 	/* Common code for port cleanup. */
26 	mutex_lock(&dev->dev_mutex);
27 	dev->on_ports &= ~(1 << port);
28 	dev->live_ports &= ~(1 << port);
29 	mutex_unlock(&dev->dev_mutex);
30 }
31 EXPORT_SYMBOL_GPL(ksz_port_cleanup);
32 
33 void ksz_update_port_member(struct ksz_device *dev, int port)
34 {
35 	struct ksz_port *p;
36 	int i;
37 
38 	for (i = 0; i < dev->port_cnt; i++) {
39 		if (i == port || i == dev->cpu_port)
40 			continue;
41 		p = &dev->ports[i];
42 		if (!(dev->member & (1 << i)))
43 			continue;
44 
45 		/* Port is a member of the bridge and is forwarding. */
46 		if (p->stp_state == BR_STATE_FORWARDING &&
47 		    p->member != dev->member)
48 			dev->dev_ops->cfg_port_member(dev, i, dev->member);
49 	}
50 }
51 EXPORT_SYMBOL_GPL(ksz_update_port_member);
52 
53 static void port_r_cnt(struct ksz_device *dev, int port)
54 {
55 	struct ksz_port_mib *mib = &dev->ports[port].mib;
56 	u64 *dropped;
57 
58 	/* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
59 	while (mib->cnt_ptr < dev->reg_mib_cnt) {
60 		dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
61 					&mib->counters[mib->cnt_ptr]);
62 		++mib->cnt_ptr;
63 	}
64 
65 	/* last one in storage */
66 	dropped = &mib->counters[dev->mib_cnt];
67 
68 	/* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
69 	while (mib->cnt_ptr < dev->mib_cnt) {
70 		dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
71 					dropped, &mib->counters[mib->cnt_ptr]);
72 		++mib->cnt_ptr;
73 	}
74 	mib->cnt_ptr = 0;
75 }
76 
77 static void ksz_mib_read_work(struct work_struct *work)
78 {
79 	struct ksz_device *dev = container_of(work, struct ksz_device,
80 					      mib_read);
81 	struct ksz_port_mib *mib;
82 	struct ksz_port *p;
83 	int i;
84 
85 	for (i = 0; i < dev->mib_port_cnt; i++) {
86 		if (dsa_is_unused_port(dev->ds, i))
87 			continue;
88 
89 		p = &dev->ports[i];
90 		mib = &p->mib;
91 		mutex_lock(&mib->cnt_mutex);
92 
93 		/* Only read MIB counters when the port is told to do.
94 		 * If not, read only dropped counters when link is not up.
95 		 */
96 		if (!p->read) {
97 			const struct dsa_port *dp = dsa_to_port(dev->ds, i);
98 
99 			if (!netif_carrier_ok(dp->slave))
100 				mib->cnt_ptr = dev->reg_mib_cnt;
101 		}
102 		port_r_cnt(dev, i);
103 		p->read = false;
104 		mutex_unlock(&mib->cnt_mutex);
105 	}
106 }
107 
108 static void mib_monitor(struct timer_list *t)
109 {
110 	struct ksz_device *dev = from_timer(dev, t, mib_read_timer);
111 
112 	mod_timer(&dev->mib_read_timer, jiffies + dev->mib_read_interval);
113 	schedule_work(&dev->mib_read);
114 }
115 
116 void ksz_init_mib_timer(struct ksz_device *dev)
117 {
118 	int i;
119 
120 	/* Read MIB counters every 30 seconds to avoid overflow. */
121 	dev->mib_read_interval = msecs_to_jiffies(30000);
122 
123 	INIT_WORK(&dev->mib_read, ksz_mib_read_work);
124 	timer_setup(&dev->mib_read_timer, mib_monitor, 0);
125 
126 	for (i = 0; i < dev->mib_port_cnt; i++)
127 		dev->dev_ops->port_init_cnt(dev, i);
128 
129 	/* Start the timer 2 seconds later. */
130 	dev->mib_read_timer.expires = jiffies + msecs_to_jiffies(2000);
131 	add_timer(&dev->mib_read_timer);
132 }
133 EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
134 
135 int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
136 {
137 	struct ksz_device *dev = ds->priv;
138 	u16 val = 0xffff;
139 
140 	dev->dev_ops->r_phy(dev, addr, reg, &val);
141 
142 	return val;
143 }
144 EXPORT_SYMBOL_GPL(ksz_phy_read16);
145 
146 int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
147 {
148 	struct ksz_device *dev = ds->priv;
149 
150 	dev->dev_ops->w_phy(dev, addr, reg, val);
151 
152 	return 0;
153 }
154 EXPORT_SYMBOL_GPL(ksz_phy_write16);
155 
156 void ksz_adjust_link(struct dsa_switch *ds, int port,
157 		     struct phy_device *phydev)
158 {
159 	struct ksz_device *dev = ds->priv;
160 	struct ksz_port *p = &dev->ports[port];
161 
162 	/* Read all MIB counters when the link is going down. */
163 	if (!phydev->link) {
164 		p->read = true;
165 		schedule_work(&dev->mib_read);
166 	}
167 	mutex_lock(&dev->dev_mutex);
168 	if (!phydev->link)
169 		dev->live_ports &= ~(1 << port);
170 	else
171 		/* Remember which port is connected and active. */
172 		dev->live_ports |= (1 << port) & dev->on_ports;
173 	mutex_unlock(&dev->dev_mutex);
174 }
175 EXPORT_SYMBOL_GPL(ksz_adjust_link);
176 
177 int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
178 {
179 	struct ksz_device *dev = ds->priv;
180 
181 	if (sset != ETH_SS_STATS)
182 		return 0;
183 
184 	return dev->mib_cnt;
185 }
186 EXPORT_SYMBOL_GPL(ksz_sset_count);
187 
188 void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf)
189 {
190 	const struct dsa_port *dp = dsa_to_port(ds, port);
191 	struct ksz_device *dev = ds->priv;
192 	struct ksz_port_mib *mib;
193 
194 	mib = &dev->ports[port].mib;
195 	mutex_lock(&mib->cnt_mutex);
196 
197 	/* Only read dropped counters if no link. */
198 	if (!netif_carrier_ok(dp->slave))
199 		mib->cnt_ptr = dev->reg_mib_cnt;
200 	port_r_cnt(dev, port);
201 	memcpy(buf, mib->counters, dev->mib_cnt * sizeof(u64));
202 	mutex_unlock(&mib->cnt_mutex);
203 }
204 EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats);
205 
206 int ksz_port_bridge_join(struct dsa_switch *ds, int port,
207 			 struct net_device *br)
208 {
209 	struct ksz_device *dev = ds->priv;
210 
211 	mutex_lock(&dev->dev_mutex);
212 	dev->br_member |= (1 << port);
213 	mutex_unlock(&dev->dev_mutex);
214 
215 	/* port_stp_state_set() will be called after to put the port in
216 	 * appropriate state so there is no need to do anything.
217 	 */
218 
219 	return 0;
220 }
221 EXPORT_SYMBOL_GPL(ksz_port_bridge_join);
222 
223 void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
224 			   struct net_device *br)
225 {
226 	struct ksz_device *dev = ds->priv;
227 
228 	mutex_lock(&dev->dev_mutex);
229 	dev->br_member &= ~(1 << port);
230 	dev->member &= ~(1 << port);
231 	mutex_unlock(&dev->dev_mutex);
232 
233 	/* port_stp_state_set() will be called after to put the port in
234 	 * forwarding state so there is no need to do anything.
235 	 */
236 }
237 EXPORT_SYMBOL_GPL(ksz_port_bridge_leave);
238 
239 void ksz_port_fast_age(struct dsa_switch *ds, int port)
240 {
241 	struct ksz_device *dev = ds->priv;
242 
243 	dev->dev_ops->flush_dyn_mac_table(dev, port);
244 }
245 EXPORT_SYMBOL_GPL(ksz_port_fast_age);
246 
247 int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
248 			  const struct switchdev_obj_port_vlan *vlan)
249 {
250 	/* nothing needed */
251 
252 	return 0;
253 }
254 EXPORT_SYMBOL_GPL(ksz_port_vlan_prepare);
255 
256 int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
257 		      void *data)
258 {
259 	struct ksz_device *dev = ds->priv;
260 	int ret = 0;
261 	u16 i = 0;
262 	u16 entries = 0;
263 	u8 timestamp = 0;
264 	u8 fid;
265 	u8 member;
266 	struct alu_struct alu;
267 
268 	do {
269 		alu.is_static = false;
270 		ret = dev->dev_ops->r_dyn_mac_table(dev, i, alu.mac, &fid,
271 						    &member, &timestamp,
272 						    &entries);
273 		if (!ret && (member & BIT(port))) {
274 			ret = cb(alu.mac, alu.fid, alu.is_static, data);
275 			if (ret)
276 				break;
277 		}
278 		i++;
279 	} while (i < entries);
280 	if (i >= entries)
281 		ret = 0;
282 
283 	return ret;
284 }
285 EXPORT_SYMBOL_GPL(ksz_port_fdb_dump);
286 
287 int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
288 			 const struct switchdev_obj_port_mdb *mdb)
289 {
290 	/* nothing to do */
291 	return 0;
292 }
293 EXPORT_SYMBOL_GPL(ksz_port_mdb_prepare);
294 
295 void ksz_port_mdb_add(struct dsa_switch *ds, int port,
296 		      const struct switchdev_obj_port_mdb *mdb)
297 {
298 	struct ksz_device *dev = ds->priv;
299 	struct alu_struct alu;
300 	int index;
301 	int empty = 0;
302 
303 	alu.port_forward = 0;
304 	for (index = 0; index < dev->num_statics; index++) {
305 		if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
306 			/* Found one already in static MAC table. */
307 			if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
308 			    alu.fid == mdb->vid)
309 				break;
310 		/* Remember the first empty entry. */
311 		} else if (!empty) {
312 			empty = index + 1;
313 		}
314 	}
315 
316 	/* no available entry */
317 	if (index == dev->num_statics && !empty)
318 		return;
319 
320 	/* add entry */
321 	if (index == dev->num_statics) {
322 		index = empty - 1;
323 		memset(&alu, 0, sizeof(alu));
324 		memcpy(alu.mac, mdb->addr, ETH_ALEN);
325 		alu.is_static = true;
326 	}
327 	alu.port_forward |= BIT(port);
328 	if (mdb->vid) {
329 		alu.is_use_fid = true;
330 
331 		/* Need a way to map VID to FID. */
332 		alu.fid = mdb->vid;
333 	}
334 	dev->dev_ops->w_sta_mac_table(dev, index, &alu);
335 }
336 EXPORT_SYMBOL_GPL(ksz_port_mdb_add);
337 
338 int ksz_port_mdb_del(struct dsa_switch *ds, int port,
339 		     const struct switchdev_obj_port_mdb *mdb)
340 {
341 	struct ksz_device *dev = ds->priv;
342 	struct alu_struct alu;
343 	int index;
344 	int ret = 0;
345 
346 	for (index = 0; index < dev->num_statics; index++) {
347 		if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
348 			/* Found one already in static MAC table. */
349 			if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
350 			    alu.fid == mdb->vid)
351 				break;
352 		}
353 	}
354 
355 	/* no available entry */
356 	if (index == dev->num_statics)
357 		goto exit;
358 
359 	/* clear port */
360 	alu.port_forward &= ~BIT(port);
361 	if (!alu.port_forward)
362 		alu.is_static = false;
363 	dev->dev_ops->w_sta_mac_table(dev, index, &alu);
364 
365 exit:
366 	return ret;
367 }
368 EXPORT_SYMBOL_GPL(ksz_port_mdb_del);
369 
370 int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
371 {
372 	struct ksz_device *dev = ds->priv;
373 
374 	/* setup slave port */
375 	dev->dev_ops->port_setup(dev, port, false);
376 	dev->dev_ops->phy_setup(dev, port, phy);
377 
378 	/* port_stp_state_set() will be called after to enable the port so
379 	 * there is no need to do anything.
380 	 */
381 
382 	return 0;
383 }
384 EXPORT_SYMBOL_GPL(ksz_enable_port);
385 
386 void ksz_disable_port(struct dsa_switch *ds, int port)
387 {
388 	struct ksz_device *dev = ds->priv;
389 
390 	dev->on_ports &= ~(1 << port);
391 	dev->live_ports &= ~(1 << port);
392 
393 	/* port_stp_state_set() will be called after to disable the port so
394 	 * there is no need to do anything.
395 	 */
396 }
397 EXPORT_SYMBOL_GPL(ksz_disable_port);
398 
399 struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
400 {
401 	struct dsa_switch *ds;
402 	struct ksz_device *swdev;
403 
404 	ds = dsa_switch_alloc(base, DSA_MAX_PORTS);
405 	if (!ds)
406 		return NULL;
407 
408 	swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL);
409 	if (!swdev)
410 		return NULL;
411 
412 	ds->priv = swdev;
413 	swdev->dev = base;
414 
415 	swdev->ds = ds;
416 	swdev->priv = priv;
417 
418 	return swdev;
419 }
420 EXPORT_SYMBOL(ksz_switch_alloc);
421 
422 int ksz_switch_register(struct ksz_device *dev,
423 			const struct ksz_dev_ops *ops)
424 {
425 	int ret;
426 
427 	if (dev->pdata)
428 		dev->chip_id = dev->pdata->chip_id;
429 
430 	dev->reset_gpio = devm_gpiod_get_optional(dev->dev, "reset",
431 						  GPIOD_OUT_LOW);
432 	if (IS_ERR(dev->reset_gpio))
433 		return PTR_ERR(dev->reset_gpio);
434 
435 	if (dev->reset_gpio) {
436 		gpiod_set_value_cansleep(dev->reset_gpio, 1);
437 		mdelay(10);
438 		gpiod_set_value_cansleep(dev->reset_gpio, 0);
439 	}
440 
441 	mutex_init(&dev->dev_mutex);
442 	mutex_init(&dev->stats_mutex);
443 	mutex_init(&dev->alu_mutex);
444 	mutex_init(&dev->vlan_mutex);
445 
446 	dev->dev_ops = ops;
447 
448 	if (dev->dev_ops->detect(dev))
449 		return -EINVAL;
450 
451 	ret = dev->dev_ops->init(dev);
452 	if (ret)
453 		return ret;
454 
455 	/* Host port interface will be self detected, or specifically set in
456 	 * device tree.
457 	 */
458 	if (dev->dev->of_node) {
459 		ret = of_get_phy_mode(dev->dev->of_node);
460 		if (ret >= 0)
461 			dev->interface = ret;
462 		dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
463 							 "microchip,synclko-125");
464 	}
465 
466 	ret = dsa_register_switch(dev->ds);
467 	if (ret) {
468 		dev->dev_ops->exit(dev);
469 		return ret;
470 	}
471 
472 	return 0;
473 }
474 EXPORT_SYMBOL(ksz_switch_register);
475 
476 void ksz_switch_remove(struct ksz_device *dev)
477 {
478 	/* timer started */
479 	if (dev->mib_read_timer.expires) {
480 		del_timer_sync(&dev->mib_read_timer);
481 		flush_work(&dev->mib_read);
482 	}
483 
484 	dev->dev_ops->exit(dev);
485 	dsa_unregister_switch(dev->ds);
486 
487 	if (dev->reset_gpio)
488 		gpiod_set_value_cansleep(dev->reset_gpio, 1);
489 
490 }
491 EXPORT_SYMBOL(ksz_switch_remove);
492 
493 MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
494 MODULE_DESCRIPTION("Microchip KSZ Series Switch DSA Driver");
495 MODULE_LICENSE("GPL");
496