xref: /openbmc/linux/drivers/staging/most/net/net.c (revision 068ac0db)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * net.c - Networking component for Mostcore
4  *
5  * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/list.h>
16 #include <linux/wait.h>
17 #include <linux/kobject.h>
18 #include "most/core.h"
19 
20 #define MEP_HDR_LEN 8
21 #define MDP_HDR_LEN 16
22 #define MAMAC_DATA_LEN (1024 - MDP_HDR_LEN)
23 
24 #define PMHL 5
25 
26 #define PMS_TELID_UNSEGM_MAMAC	0x0A
27 #define PMS_FIFONO_MDP		0x01
28 #define PMS_FIFONO_MEP		0x04
29 #define PMS_MSGTYPE_DATA	0x04
30 #define PMS_DEF_PRIO		0
31 #define MEP_DEF_RETRY		15
32 
33 #define PMS_FIFONO_MASK		0x07
34 #define PMS_FIFONO_SHIFT	3
35 #define PMS_RETRY_SHIFT		4
36 #define PMS_TELID_MASK		0x0F
37 #define PMS_TELID_SHIFT		4
38 
39 #define HB(value)		((u8)((u16)(value) >> 8))
40 #define LB(value)		((u8)(value))
41 
42 #define EXTRACT_BIT_SET(bitset_name, value) \
43 	(((value) >> bitset_name##_SHIFT) & bitset_name##_MASK)
44 
45 #define PMS_IS_MEP(buf, len) \
46 	((len) > MEP_HDR_LEN && \
47 	 EXTRACT_BIT_SET(PMS_FIFONO, (buf)[3]) == PMS_FIFONO_MEP)
48 
49 static inline bool pms_is_mamac(char *buf, u32 len)
50 {
51 	return (len > MDP_HDR_LEN &&
52 		EXTRACT_BIT_SET(PMS_FIFONO, buf[3]) == PMS_FIFONO_MDP &&
53 		EXTRACT_BIT_SET(PMS_TELID, buf[14]) == PMS_TELID_UNSEGM_MAMAC);
54 }
55 
56 struct net_dev_channel {
57 	bool linked;
58 	int ch_id;
59 };
60 
61 struct net_dev_context {
62 	struct most_interface *iface;
63 	bool is_mamac;
64 	struct net_device *dev;
65 	struct net_dev_channel rx;
66 	struct net_dev_channel tx;
67 	struct list_head list;
68 };
69 
70 static struct list_head net_devices = LIST_HEAD_INIT(net_devices);
71 static struct mutex probe_disc_mt; /* ch->linked = true, most_nd_open */
72 static DEFINE_SPINLOCK(list_lock); /* list_head, ch->linked = false, dev_hold */
73 static struct core_component comp;
74 
75 static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
76 {
77 	u8 *buff = mbo->virt_address;
78 	static const u8 broadcast[] = { 0x03, 0xFF };
79 	const u8 *dest_addr = skb->data + 4;
80 	const u8 *eth_type = skb->data + 12;
81 	unsigned int payload_len = skb->len - ETH_HLEN;
82 	unsigned int mdp_len = payload_len + MDP_HDR_LEN;
83 
84 	if (mbo->buffer_length < mdp_len) {
85 		pr_err("drop: too small buffer! (%d for %d)\n",
86 		       mbo->buffer_length, mdp_len);
87 		return -EINVAL;
88 	}
89 
90 	if (skb->len < ETH_HLEN) {
91 		pr_err("drop: too small packet! (%d)\n", skb->len);
92 		return -EINVAL;
93 	}
94 
95 	if (dest_addr[0] == 0xFF && dest_addr[1] == 0xFF)
96 		dest_addr = broadcast;
97 
98 	*buff++ = HB(mdp_len - 2);
99 	*buff++ = LB(mdp_len - 2);
100 
101 	*buff++ = PMHL;
102 	*buff++ = (PMS_FIFONO_MDP << PMS_FIFONO_SHIFT) | PMS_MSGTYPE_DATA;
103 	*buff++ = PMS_DEF_PRIO;
104 	*buff++ = dest_addr[0];
105 	*buff++ = dest_addr[1];
106 	*buff++ = 0x00;
107 
108 	*buff++ = HB(payload_len + 6);
109 	*buff++ = LB(payload_len + 6);
110 
111 	/* end of FPH here */
112 
113 	*buff++ = eth_type[0];
114 	*buff++ = eth_type[1];
115 	*buff++ = 0;
116 	*buff++ = 0;
117 
118 	*buff++ = PMS_TELID_UNSEGM_MAMAC << 4 | HB(payload_len);
119 	*buff++ = LB(payload_len);
120 
121 	memcpy(buff, skb->data + ETH_HLEN, payload_len);
122 	mbo->buffer_length = mdp_len;
123 	return 0;
124 }
125 
126 static int skb_to_mep(const struct sk_buff *skb, struct mbo *mbo)
127 {
128 	u8 *buff = mbo->virt_address;
129 	unsigned int mep_len = skb->len + MEP_HDR_LEN;
130 
131 	if (mbo->buffer_length < mep_len) {
132 		pr_err("drop: too small buffer! (%d for %d)\n",
133 		       mbo->buffer_length, mep_len);
134 		return -EINVAL;
135 	}
136 
137 	*buff++ = HB(mep_len - 2);
138 	*buff++ = LB(mep_len - 2);
139 
140 	*buff++ = PMHL;
141 	*buff++ = (PMS_FIFONO_MEP << PMS_FIFONO_SHIFT) | PMS_MSGTYPE_DATA;
142 	*buff++ = (MEP_DEF_RETRY << PMS_RETRY_SHIFT) | PMS_DEF_PRIO;
143 	*buff++ = 0;
144 	*buff++ = 0;
145 	*buff++ = 0;
146 
147 	memcpy(buff, skb->data, skb->len);
148 	mbo->buffer_length = mep_len;
149 	return 0;
150 }
151 
152 static int most_nd_set_mac_address(struct net_device *dev, void *p)
153 {
154 	struct net_dev_context *nd = netdev_priv(dev);
155 	int err = eth_mac_addr(dev, p);
156 
157 	if (err)
158 		return err;
159 
160 	nd->is_mamac =
161 		(dev->dev_addr[0] == 0 && dev->dev_addr[1] == 0 &&
162 		 dev->dev_addr[2] == 0 && dev->dev_addr[3] == 0);
163 
164 	/*
165 	 * Set default MTU for the given packet type.
166 	 * It is still possible to change MTU using ip tools afterwards.
167 	 */
168 	dev->mtu = nd->is_mamac ? MAMAC_DATA_LEN : ETH_DATA_LEN;
169 
170 	return 0;
171 }
172 
173 static void on_netinfo(struct most_interface *iface,
174 		       unsigned char link_stat, unsigned char *mac_addr);
175 
176 static int most_nd_open(struct net_device *dev)
177 {
178 	struct net_dev_context *nd = netdev_priv(dev);
179 	int ret = 0;
180 
181 	mutex_lock(&probe_disc_mt);
182 
183 	if (most_start_channel(nd->iface, nd->rx.ch_id, &comp)) {
184 		netdev_err(dev, "most_start_channel() failed\n");
185 		ret = -EBUSY;
186 		goto unlock;
187 	}
188 
189 	if (most_start_channel(nd->iface, nd->tx.ch_id, &comp)) {
190 		netdev_err(dev, "most_start_channel() failed\n");
191 		most_stop_channel(nd->iface, nd->rx.ch_id, &comp);
192 		ret = -EBUSY;
193 		goto unlock;
194 	}
195 
196 	netif_carrier_off(dev);
197 	if (is_valid_ether_addr(dev->dev_addr))
198 		netif_dormant_off(dev);
199 	else
200 		netif_dormant_on(dev);
201 	netif_wake_queue(dev);
202 	if (nd->iface->request_netinfo)
203 		nd->iface->request_netinfo(nd->iface, nd->tx.ch_id, on_netinfo);
204 
205 unlock:
206 	mutex_unlock(&probe_disc_mt);
207 	return ret;
208 }
209 
210 static int most_nd_stop(struct net_device *dev)
211 {
212 	struct net_dev_context *nd = netdev_priv(dev);
213 
214 	netif_stop_queue(dev);
215 	if (nd->iface->request_netinfo)
216 		nd->iface->request_netinfo(nd->iface, nd->tx.ch_id, NULL);
217 	most_stop_channel(nd->iface, nd->rx.ch_id, &comp);
218 	most_stop_channel(nd->iface, nd->tx.ch_id, &comp);
219 
220 	return 0;
221 }
222 
223 static netdev_tx_t most_nd_start_xmit(struct sk_buff *skb,
224 				      struct net_device *dev)
225 {
226 	struct net_dev_context *nd = netdev_priv(dev);
227 	struct mbo *mbo;
228 	int ret;
229 
230 	mbo = most_get_mbo(nd->iface, nd->tx.ch_id, &comp);
231 
232 	if (!mbo) {
233 		netif_stop_queue(dev);
234 		dev->stats.tx_fifo_errors++;
235 		return NETDEV_TX_BUSY;
236 	}
237 
238 	if (nd->is_mamac)
239 		ret = skb_to_mamac(skb, mbo);
240 	else
241 		ret = skb_to_mep(skb, mbo);
242 
243 	if (ret) {
244 		most_put_mbo(mbo);
245 		dev->stats.tx_dropped++;
246 		kfree_skb(skb);
247 		return NETDEV_TX_OK;
248 	}
249 
250 	most_submit_mbo(mbo);
251 	dev->stats.tx_packets++;
252 	dev->stats.tx_bytes += skb->len;
253 	kfree_skb(skb);
254 	return NETDEV_TX_OK;
255 }
256 
257 static const struct net_device_ops most_nd_ops = {
258 	.ndo_open = most_nd_open,
259 	.ndo_stop = most_nd_stop,
260 	.ndo_start_xmit = most_nd_start_xmit,
261 	.ndo_set_mac_address = most_nd_set_mac_address,
262 };
263 
264 static void most_nd_setup(struct net_device *dev)
265 {
266 	ether_setup(dev);
267 	dev->netdev_ops = &most_nd_ops;
268 }
269 
270 static struct net_dev_context *get_net_dev(struct most_interface *iface)
271 {
272 	struct net_dev_context *nd;
273 
274 	list_for_each_entry(nd, &net_devices, list)
275 		if (nd->iface == iface)
276 			return nd;
277 	return NULL;
278 }
279 
280 static struct net_dev_context *get_net_dev_hold(struct most_interface *iface)
281 {
282 	struct net_dev_context *nd;
283 	unsigned long flags;
284 
285 	spin_lock_irqsave(&list_lock, flags);
286 	nd = get_net_dev(iface);
287 	if (nd && nd->rx.linked && nd->tx.linked)
288 		dev_hold(nd->dev);
289 	else
290 		nd = NULL;
291 	spin_unlock_irqrestore(&list_lock, flags);
292 	return nd;
293 }
294 
295 static int comp_probe_channel(struct most_interface *iface, int channel_idx,
296 			      struct most_channel_config *ccfg, char *name,
297 			      char *args)
298 {
299 	struct net_dev_context *nd;
300 	struct net_dev_channel *ch;
301 	struct net_device *dev;
302 	unsigned long flags;
303 	int ret = 0;
304 
305 	if (!iface)
306 		return -EINVAL;
307 
308 	if (ccfg->data_type != MOST_CH_ASYNC)
309 		return -EINVAL;
310 
311 	mutex_lock(&probe_disc_mt);
312 	nd = get_net_dev(iface);
313 	if (!nd) {
314 		dev = alloc_netdev(sizeof(struct net_dev_context), "meth%d",
315 				   NET_NAME_UNKNOWN, most_nd_setup);
316 		if (!dev) {
317 			ret = -ENOMEM;
318 			goto unlock;
319 		}
320 
321 		nd = netdev_priv(dev);
322 		nd->iface = iface;
323 		nd->dev = dev;
324 
325 		spin_lock_irqsave(&list_lock, flags);
326 		list_add(&nd->list, &net_devices);
327 		spin_unlock_irqrestore(&list_lock, flags);
328 
329 		ch = ccfg->direction == MOST_CH_TX ? &nd->tx : &nd->rx;
330 	} else {
331 		ch = ccfg->direction == MOST_CH_TX ? &nd->tx : &nd->rx;
332 		if (ch->linked) {
333 			pr_err("direction is allocated\n");
334 			ret = -EINVAL;
335 			goto unlock;
336 		}
337 
338 		if (register_netdev(nd->dev)) {
339 			pr_err("register_netdev() failed\n");
340 			ret = -EINVAL;
341 			goto unlock;
342 		}
343 	}
344 	ch->ch_id = channel_idx;
345 	ch->linked = true;
346 
347 unlock:
348 	mutex_unlock(&probe_disc_mt);
349 	return ret;
350 }
351 
352 static int comp_disconnect_channel(struct most_interface *iface,
353 				   int channel_idx)
354 {
355 	struct net_dev_context *nd;
356 	struct net_dev_channel *ch;
357 	unsigned long flags;
358 	int ret = 0;
359 
360 	mutex_lock(&probe_disc_mt);
361 	nd = get_net_dev(iface);
362 	if (!nd) {
363 		ret = -EINVAL;
364 		goto unlock;
365 	}
366 
367 	if (nd->rx.linked && channel_idx == nd->rx.ch_id) {
368 		ch = &nd->rx;
369 	} else if (nd->tx.linked && channel_idx == nd->tx.ch_id) {
370 		ch = &nd->tx;
371 	} else {
372 		ret = -EINVAL;
373 		goto unlock;
374 	}
375 
376 	if (nd->rx.linked && nd->tx.linked) {
377 		spin_lock_irqsave(&list_lock, flags);
378 		ch->linked = false;
379 		spin_unlock_irqrestore(&list_lock, flags);
380 
381 		/*
382 		 * do not call most_stop_channel() here, because channels are
383 		 * going to be closed in ndo_stop() after unregister_netdev()
384 		 */
385 		unregister_netdev(nd->dev);
386 	} else {
387 		spin_lock_irqsave(&list_lock, flags);
388 		list_del(&nd->list);
389 		spin_unlock_irqrestore(&list_lock, flags);
390 
391 		free_netdev(nd->dev);
392 	}
393 
394 unlock:
395 	mutex_unlock(&probe_disc_mt);
396 	return ret;
397 }
398 
399 static int comp_resume_tx_channel(struct most_interface *iface,
400 				  int channel_idx)
401 {
402 	struct net_dev_context *nd;
403 
404 	nd = get_net_dev_hold(iface);
405 	if (!nd)
406 		return 0;
407 
408 	if (nd->tx.ch_id != channel_idx)
409 		goto put_nd;
410 
411 	netif_wake_queue(nd->dev);
412 
413 put_nd:
414 	dev_put(nd->dev);
415 	return 0;
416 }
417 
418 static int comp_rx_data(struct mbo *mbo)
419 {
420 	const u32 zero = 0;
421 	struct net_dev_context *nd;
422 	char *buf = mbo->virt_address;
423 	u32 len = mbo->processed_length;
424 	struct sk_buff *skb;
425 	struct net_device *dev;
426 	unsigned int skb_len;
427 	int ret = 0;
428 
429 	nd = get_net_dev_hold(mbo->ifp);
430 	if (!nd)
431 		return -EIO;
432 
433 	if (nd->rx.ch_id != mbo->hdm_channel_id) {
434 		ret = -EIO;
435 		goto put_nd;
436 	}
437 
438 	dev = nd->dev;
439 
440 	if (nd->is_mamac) {
441 		if (!pms_is_mamac(buf, len)) {
442 			ret = -EIO;
443 			goto put_nd;
444 		}
445 
446 		skb = dev_alloc_skb(len - MDP_HDR_LEN + 2 * ETH_ALEN + 2);
447 	} else {
448 		if (!PMS_IS_MEP(buf, len)) {
449 			ret = -EIO;
450 			goto put_nd;
451 		}
452 
453 		skb = dev_alloc_skb(len - MEP_HDR_LEN);
454 	}
455 
456 	if (!skb) {
457 		dev->stats.rx_dropped++;
458 		pr_err_once("drop packet: no memory for skb\n");
459 		goto out;
460 	}
461 
462 	skb->dev = dev;
463 
464 	if (nd->is_mamac) {
465 		/* dest */
466 		ether_addr_copy(skb_put(skb, ETH_ALEN), dev->dev_addr);
467 
468 		/* src */
469 		skb_put_data(skb, &zero, 4);
470 		skb_put_data(skb, buf + 5, 2);
471 
472 		/* eth type */
473 		skb_put_data(skb, buf + 10, 2);
474 
475 		buf += MDP_HDR_LEN;
476 		len -= MDP_HDR_LEN;
477 	} else {
478 		buf += MEP_HDR_LEN;
479 		len -= MEP_HDR_LEN;
480 	}
481 
482 	skb_put_data(skb, buf, len);
483 	skb->protocol = eth_type_trans(skb, dev);
484 	skb_len = skb->len;
485 	if (netif_rx(skb) == NET_RX_SUCCESS) {
486 		dev->stats.rx_packets++;
487 		dev->stats.rx_bytes += skb_len;
488 	} else {
489 		dev->stats.rx_dropped++;
490 	}
491 
492 out:
493 	most_put_mbo(mbo);
494 
495 put_nd:
496 	dev_put(nd->dev);
497 	return ret;
498 }
499 
500 static struct core_component comp = {
501 	.mod = THIS_MODULE,
502 	.name = "net",
503 	.probe_channel = comp_probe_channel,
504 	.disconnect_channel = comp_disconnect_channel,
505 	.tx_completion = comp_resume_tx_channel,
506 	.rx_completion = comp_rx_data,
507 };
508 
509 static int __init most_net_init(void)
510 {
511 	int err;
512 
513 	mutex_init(&probe_disc_mt);
514 	err = most_register_component(&comp);
515 	if (err)
516 		return err;
517 	err = most_register_configfs_subsys(&comp);
518 	if (err) {
519 		most_deregister_component(&comp);
520 		return err;
521 	}
522 	return 0;
523 }
524 
525 static void __exit most_net_exit(void)
526 {
527 	most_deregister_configfs_subsys(&comp);
528 	most_deregister_component(&comp);
529 }
530 
531 /**
532  * on_netinfo - callback for HDM to be informed about HW's MAC
533  * @param iface - most interface instance
534  * @param link_stat - link status
535  * @param mac_addr - MAC address
536  */
537 static void on_netinfo(struct most_interface *iface,
538 		       unsigned char link_stat, unsigned char *mac_addr)
539 {
540 	struct net_dev_context *nd;
541 	struct net_device *dev;
542 	const u8 *m = mac_addr;
543 
544 	nd = get_net_dev_hold(iface);
545 	if (!nd)
546 		return;
547 
548 	dev = nd->dev;
549 
550 	if (link_stat)
551 		netif_carrier_on(dev);
552 	else
553 		netif_carrier_off(dev);
554 
555 	if (m && is_valid_ether_addr(m)) {
556 		if (!is_valid_ether_addr(dev->dev_addr)) {
557 			netdev_info(dev, "set mac %02x-%02x-%02x-%02x-%02x-%02x\n",
558 				    m[0], m[1], m[2], m[3], m[4], m[5]);
559 			ether_addr_copy(dev->dev_addr, m);
560 			netif_dormant_off(dev);
561 		} else if (!ether_addr_equal(dev->dev_addr, m)) {
562 			netdev_warn(dev, "reject mac %02x-%02x-%02x-%02x-%02x-%02x\n",
563 				    m[0], m[1], m[2], m[3], m[4], m[5]);
564 		}
565 	}
566 
567 	dev_put(nd->dev);
568 }
569 
570 module_init(most_net_init);
571 module_exit(most_net_exit);
572 MODULE_LICENSE("GPL");
573 MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
574 MODULE_DESCRIPTION("Networking Component Module for Mostcore");
575