1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Microchip switch driver main logic
4  *
5  * Copyright (C) 2017-2019 Microchip Technology Inc.
6  */
7 
8 #include <linux/delay.h>
9 #include <linux/export.h>
10 #include <linux/gpio/consumer.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/platform_data/microchip-ksz.h>
14 #include <linux/phy.h>
15 #include <linux/etherdevice.h>
16 #include <linux/if_bridge.h>
17 #include <linux/of_net.h>
18 #include <net/dsa.h>
19 #include <net/switchdev.h>
20 
21 #include "ksz_priv.h"
22 
23 void ksz_port_cleanup(struct ksz_device *dev, int port)
24 {
25 	/* Common code for port cleanup. */
26 	mutex_lock(&dev->dev_mutex);
27 	dev->on_ports &= ~(1 << port);
28 	dev->live_ports &= ~(1 << port);
29 	mutex_unlock(&dev->dev_mutex);
30 }
31 EXPORT_SYMBOL_GPL(ksz_port_cleanup);
32 
33 void ksz_update_port_member(struct ksz_device *dev, int port)
34 {
35 	struct ksz_port *p;
36 	int i;
37 
38 	for (i = 0; i < dev->port_cnt; i++) {
39 		if (i == port || i == dev->cpu_port)
40 			continue;
41 		p = &dev->ports[i];
42 		if (!(dev->member & (1 << i)))
43 			continue;
44 
45 		/* Port is a member of the bridge and is forwarding. */
46 		if (p->stp_state == BR_STATE_FORWARDING &&
47 		    p->member != dev->member)
48 			dev->dev_ops->cfg_port_member(dev, i, dev->member);
49 	}
50 }
51 EXPORT_SYMBOL_GPL(ksz_update_port_member);
52 
53 static void port_r_cnt(struct ksz_device *dev, int port)
54 {
55 	struct ksz_port_mib *mib = &dev->ports[port].mib;
56 	u64 *dropped;
57 
58 	/* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
59 	while (mib->cnt_ptr < dev->reg_mib_cnt) {
60 		dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
61 					&mib->counters[mib->cnt_ptr]);
62 		++mib->cnt_ptr;
63 	}
64 
65 	/* last one in storage */
66 	dropped = &mib->counters[dev->mib_cnt];
67 
68 	/* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
69 	while (mib->cnt_ptr < dev->mib_cnt) {
70 		dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
71 					dropped, &mib->counters[mib->cnt_ptr]);
72 		++mib->cnt_ptr;
73 	}
74 	mib->cnt_ptr = 0;
75 }
76 
77 static void ksz_mib_read_work(struct work_struct *work)
78 {
79 	struct ksz_device *dev = container_of(work, struct ksz_device,
80 					      mib_read);
81 	struct ksz_port_mib *mib;
82 	struct ksz_port *p;
83 	int i;
84 
85 	for (i = 0; i < dev->mib_port_cnt; i++) {
86 		p = &dev->ports[i];
87 		mib = &p->mib;
88 		mutex_lock(&mib->cnt_mutex);
89 
90 		/* Only read MIB counters when the port is told to do.
91 		 * If not, read only dropped counters when link is not up.
92 		 */
93 		if (!p->read) {
94 			const struct dsa_port *dp = dsa_to_port(dev->ds, i);
95 
96 			if (!netif_carrier_ok(dp->slave))
97 				mib->cnt_ptr = dev->reg_mib_cnt;
98 		}
99 		port_r_cnt(dev, i);
100 		p->read = false;
101 		mutex_unlock(&mib->cnt_mutex);
102 	}
103 }
104 
105 static void mib_monitor(struct timer_list *t)
106 {
107 	struct ksz_device *dev = from_timer(dev, t, mib_read_timer);
108 
109 	mod_timer(&dev->mib_read_timer, jiffies + dev->mib_read_interval);
110 	schedule_work(&dev->mib_read);
111 }
112 
113 void ksz_init_mib_timer(struct ksz_device *dev)
114 {
115 	int i;
116 
117 	/* Read MIB counters every 30 seconds to avoid overflow. */
118 	dev->mib_read_interval = msecs_to_jiffies(30000);
119 
120 	INIT_WORK(&dev->mib_read, ksz_mib_read_work);
121 	timer_setup(&dev->mib_read_timer, mib_monitor, 0);
122 
123 	for (i = 0; i < dev->mib_port_cnt; i++)
124 		dev->dev_ops->port_init_cnt(dev, i);
125 
126 	/* Start the timer 2 seconds later. */
127 	dev->mib_read_timer.expires = jiffies + msecs_to_jiffies(2000);
128 	add_timer(&dev->mib_read_timer);
129 }
130 EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
131 
132 int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
133 {
134 	struct ksz_device *dev = ds->priv;
135 	u16 val = 0xffff;
136 
137 	dev->dev_ops->r_phy(dev, addr, reg, &val);
138 
139 	return val;
140 }
141 EXPORT_SYMBOL_GPL(ksz_phy_read16);
142 
143 int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
144 {
145 	struct ksz_device *dev = ds->priv;
146 
147 	dev->dev_ops->w_phy(dev, addr, reg, val);
148 
149 	return 0;
150 }
151 EXPORT_SYMBOL_GPL(ksz_phy_write16);
152 
153 void ksz_adjust_link(struct dsa_switch *ds, int port,
154 		     struct phy_device *phydev)
155 {
156 	struct ksz_device *dev = ds->priv;
157 	struct ksz_port *p = &dev->ports[port];
158 
159 	/* Read all MIB counters when the link is going down. */
160 	if (!phydev->link) {
161 		p->read = true;
162 		schedule_work(&dev->mib_read);
163 	}
164 	mutex_lock(&dev->dev_mutex);
165 	if (!phydev->link)
166 		dev->live_ports &= ~(1 << port);
167 	else
168 		/* Remember which port is connected and active. */
169 		dev->live_ports |= (1 << port) & dev->on_ports;
170 	mutex_unlock(&dev->dev_mutex);
171 }
172 EXPORT_SYMBOL_GPL(ksz_adjust_link);
173 
174 int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
175 {
176 	struct ksz_device *dev = ds->priv;
177 
178 	if (sset != ETH_SS_STATS)
179 		return 0;
180 
181 	return dev->mib_cnt;
182 }
183 EXPORT_SYMBOL_GPL(ksz_sset_count);
184 
185 void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf)
186 {
187 	const struct dsa_port *dp = dsa_to_port(ds, port);
188 	struct ksz_device *dev = ds->priv;
189 	struct ksz_port_mib *mib;
190 
191 	mib = &dev->ports[port].mib;
192 	mutex_lock(&mib->cnt_mutex);
193 
194 	/* Only read dropped counters if no link. */
195 	if (!netif_carrier_ok(dp->slave))
196 		mib->cnt_ptr = dev->reg_mib_cnt;
197 	port_r_cnt(dev, port);
198 	memcpy(buf, mib->counters, dev->mib_cnt * sizeof(u64));
199 	mutex_unlock(&mib->cnt_mutex);
200 }
201 EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats);
202 
203 int ksz_port_bridge_join(struct dsa_switch *ds, int port,
204 			 struct net_device *br)
205 {
206 	struct ksz_device *dev = ds->priv;
207 
208 	mutex_lock(&dev->dev_mutex);
209 	dev->br_member |= (1 << port);
210 	mutex_unlock(&dev->dev_mutex);
211 
212 	/* port_stp_state_set() will be called after to put the port in
213 	 * appropriate state so there is no need to do anything.
214 	 */
215 
216 	return 0;
217 }
218 EXPORT_SYMBOL_GPL(ksz_port_bridge_join);
219 
220 void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
221 			   struct net_device *br)
222 {
223 	struct ksz_device *dev = ds->priv;
224 
225 	mutex_lock(&dev->dev_mutex);
226 	dev->br_member &= ~(1 << port);
227 	dev->member &= ~(1 << port);
228 	mutex_unlock(&dev->dev_mutex);
229 
230 	/* port_stp_state_set() will be called after to put the port in
231 	 * forwarding state so there is no need to do anything.
232 	 */
233 }
234 EXPORT_SYMBOL_GPL(ksz_port_bridge_leave);
235 
236 void ksz_port_fast_age(struct dsa_switch *ds, int port)
237 {
238 	struct ksz_device *dev = ds->priv;
239 
240 	dev->dev_ops->flush_dyn_mac_table(dev, port);
241 }
242 EXPORT_SYMBOL_GPL(ksz_port_fast_age);
243 
244 int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
245 			  const struct switchdev_obj_port_vlan *vlan)
246 {
247 	/* nothing needed */
248 
249 	return 0;
250 }
251 EXPORT_SYMBOL_GPL(ksz_port_vlan_prepare);
252 
253 int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
254 		      void *data)
255 {
256 	struct ksz_device *dev = ds->priv;
257 	int ret = 0;
258 	u16 i = 0;
259 	u16 entries = 0;
260 	u8 timestamp = 0;
261 	u8 fid;
262 	u8 member;
263 	struct alu_struct alu;
264 
265 	do {
266 		alu.is_static = false;
267 		ret = dev->dev_ops->r_dyn_mac_table(dev, i, alu.mac, &fid,
268 						    &member, &timestamp,
269 						    &entries);
270 		if (!ret && (member & BIT(port))) {
271 			ret = cb(alu.mac, alu.fid, alu.is_static, data);
272 			if (ret)
273 				break;
274 		}
275 		i++;
276 	} while (i < entries);
277 	if (i >= entries)
278 		ret = 0;
279 
280 	return ret;
281 }
282 EXPORT_SYMBOL_GPL(ksz_port_fdb_dump);
283 
284 int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
285 			 const struct switchdev_obj_port_mdb *mdb)
286 {
287 	/* nothing to do */
288 	return 0;
289 }
290 EXPORT_SYMBOL_GPL(ksz_port_mdb_prepare);
291 
292 void ksz_port_mdb_add(struct dsa_switch *ds, int port,
293 		      const struct switchdev_obj_port_mdb *mdb)
294 {
295 	struct ksz_device *dev = ds->priv;
296 	struct alu_struct alu;
297 	int index;
298 	int empty = 0;
299 
300 	alu.port_forward = 0;
301 	for (index = 0; index < dev->num_statics; index++) {
302 		if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
303 			/* Found one already in static MAC table. */
304 			if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
305 			    alu.fid == mdb->vid)
306 				break;
307 		/* Remember the first empty entry. */
308 		} else if (!empty) {
309 			empty = index + 1;
310 		}
311 	}
312 
313 	/* no available entry */
314 	if (index == dev->num_statics && !empty)
315 		return;
316 
317 	/* add entry */
318 	if (index == dev->num_statics) {
319 		index = empty - 1;
320 		memset(&alu, 0, sizeof(alu));
321 		memcpy(alu.mac, mdb->addr, ETH_ALEN);
322 		alu.is_static = true;
323 	}
324 	alu.port_forward |= BIT(port);
325 	if (mdb->vid) {
326 		alu.is_use_fid = true;
327 
328 		/* Need a way to map VID to FID. */
329 		alu.fid = mdb->vid;
330 	}
331 	dev->dev_ops->w_sta_mac_table(dev, index, &alu);
332 }
333 EXPORT_SYMBOL_GPL(ksz_port_mdb_add);
334 
335 int ksz_port_mdb_del(struct dsa_switch *ds, int port,
336 		     const struct switchdev_obj_port_mdb *mdb)
337 {
338 	struct ksz_device *dev = ds->priv;
339 	struct alu_struct alu;
340 	int index;
341 	int ret = 0;
342 
343 	for (index = 0; index < dev->num_statics; index++) {
344 		if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
345 			/* Found one already in static MAC table. */
346 			if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
347 			    alu.fid == mdb->vid)
348 				break;
349 		}
350 	}
351 
352 	/* no available entry */
353 	if (index == dev->num_statics)
354 		goto exit;
355 
356 	/* clear port */
357 	alu.port_forward &= ~BIT(port);
358 	if (!alu.port_forward)
359 		alu.is_static = false;
360 	dev->dev_ops->w_sta_mac_table(dev, index, &alu);
361 
362 exit:
363 	return ret;
364 }
365 EXPORT_SYMBOL_GPL(ksz_port_mdb_del);
366 
367 int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
368 {
369 	struct ksz_device *dev = ds->priv;
370 
371 	/* setup slave port */
372 	dev->dev_ops->port_setup(dev, port, false);
373 	dev->dev_ops->phy_setup(dev, port, phy);
374 
375 	/* port_stp_state_set() will be called after to enable the port so
376 	 * there is no need to do anything.
377 	 */
378 
379 	return 0;
380 }
381 EXPORT_SYMBOL_GPL(ksz_enable_port);
382 
383 void ksz_disable_port(struct dsa_switch *ds, int port)
384 {
385 	struct ksz_device *dev = ds->priv;
386 
387 	dev->on_ports &= ~(1 << port);
388 	dev->live_ports &= ~(1 << port);
389 
390 	/* port_stp_state_set() will be called after to disable the port so
391 	 * there is no need to do anything.
392 	 */
393 }
394 EXPORT_SYMBOL_GPL(ksz_disable_port);
395 
396 struct ksz_device *ksz_switch_alloc(struct device *base,
397 				    const struct ksz_io_ops *ops,
398 				    void *priv)
399 {
400 	struct dsa_switch *ds;
401 	struct ksz_device *swdev;
402 
403 	ds = dsa_switch_alloc(base, DSA_MAX_PORTS);
404 	if (!ds)
405 		return NULL;
406 
407 	swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL);
408 	if (!swdev)
409 		return NULL;
410 
411 	ds->priv = swdev;
412 	swdev->dev = base;
413 
414 	swdev->ds = ds;
415 	swdev->priv = priv;
416 	swdev->ops = ops;
417 
418 	return swdev;
419 }
420 EXPORT_SYMBOL(ksz_switch_alloc);
421 
422 int ksz_switch_register(struct ksz_device *dev,
423 			const struct ksz_dev_ops *ops)
424 {
425 	int ret;
426 
427 	if (dev->pdata)
428 		dev->chip_id = dev->pdata->chip_id;
429 
430 	dev->reset_gpio = devm_gpiod_get_optional(dev->dev, "reset",
431 						  GPIOD_OUT_LOW);
432 	if (IS_ERR(dev->reset_gpio))
433 		return PTR_ERR(dev->reset_gpio);
434 
435 	if (dev->reset_gpio) {
436 		gpiod_set_value(dev->reset_gpio, 1);
437 		mdelay(10);
438 		gpiod_set_value(dev->reset_gpio, 0);
439 	}
440 
441 	mutex_init(&dev->dev_mutex);
442 	mutex_init(&dev->reg_mutex);
443 	mutex_init(&dev->stats_mutex);
444 	mutex_init(&dev->alu_mutex);
445 	mutex_init(&dev->vlan_mutex);
446 
447 	dev->dev_ops = ops;
448 
449 	if (dev->dev_ops->detect(dev))
450 		return -EINVAL;
451 
452 	ret = dev->dev_ops->init(dev);
453 	if (ret)
454 		return ret;
455 
456 	/* Host port interface will be self detected, or specifically set in
457 	 * device tree.
458 	 */
459 	if (dev->dev->of_node) {
460 		ret = of_get_phy_mode(dev->dev->of_node);
461 		if (ret >= 0)
462 			dev->interface = ret;
463 	}
464 
465 	ret = dsa_register_switch(dev->ds);
466 	if (ret) {
467 		dev->dev_ops->exit(dev);
468 		return ret;
469 	}
470 
471 	return 0;
472 }
473 EXPORT_SYMBOL(ksz_switch_register);
474 
475 void ksz_switch_remove(struct ksz_device *dev)
476 {
477 	/* timer started */
478 	if (dev->mib_read_timer.expires) {
479 		del_timer_sync(&dev->mib_read_timer);
480 		flush_work(&dev->mib_read);
481 	}
482 
483 	dev->dev_ops->exit(dev);
484 	dsa_unregister_switch(dev->ds);
485 
486 	if (dev->reset_gpio)
487 		gpiod_set_value(dev->reset_gpio, 1);
488 
489 }
490 EXPORT_SYMBOL(ksz_switch_remove);
491 
492 MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
493 MODULE_DESCRIPTION("Microchip KSZ Series Switch DSA Driver");
494 MODULE_LICENSE("GPL");
495