xref: /openbmc/linux/drivers/net/mhi_net.c (revision 215c44fa)
17ffa7542SLoic Poulain // SPDX-License-Identifier: GPL-2.0-or-later
27ffa7542SLoic Poulain /* MHI Network driver - Network over MHI bus
37ffa7542SLoic Poulain  *
47ffa7542SLoic Poulain  * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
57ffa7542SLoic Poulain  */
67ffa7542SLoic Poulain 
77ffa7542SLoic Poulain #include <linux/if_arp.h>
87ffa7542SLoic Poulain #include <linux/mhi.h>
97ffa7542SLoic Poulain #include <linux/mod_devicetable.h>
107ffa7542SLoic Poulain #include <linux/module.h>
117ffa7542SLoic Poulain #include <linux/netdevice.h>
127ffa7542SLoic Poulain #include <linux/skbuff.h>
137ffa7542SLoic Poulain #include <linux/u64_stats_sync.h>
147ffa7542SLoic Poulain 
157ffa7542SLoic Poulain #define MHI_NET_MIN_MTU		ETH_MIN_MTU
167ffa7542SLoic Poulain #define MHI_NET_MAX_MTU		0xffff
177ffa7542SLoic Poulain #define MHI_NET_DEFAULT_MTU	0x4000
187ffa7542SLoic Poulain 
197ffa7542SLoic Poulain struct mhi_net_stats {
207ffa7542SLoic Poulain 	u64_stats_t rx_packets;
217ffa7542SLoic Poulain 	u64_stats_t rx_bytes;
227ffa7542SLoic Poulain 	u64_stats_t rx_errors;
237ffa7542SLoic Poulain 	u64_stats_t tx_packets;
247ffa7542SLoic Poulain 	u64_stats_t tx_bytes;
257ffa7542SLoic Poulain 	u64_stats_t tx_errors;
267ffa7542SLoic Poulain 	u64_stats_t tx_dropped;
277ffa7542SLoic Poulain 	struct u64_stats_sync tx_syncp;
287ffa7542SLoic Poulain 	struct u64_stats_sync rx_syncp;
297ffa7542SLoic Poulain };
307ffa7542SLoic Poulain 
317ffa7542SLoic Poulain struct mhi_net_dev {
327ffa7542SLoic Poulain 	struct mhi_device *mdev;
337ffa7542SLoic Poulain 	struct net_device *ndev;
347ffa7542SLoic Poulain 	struct sk_buff *skbagg_head;
357ffa7542SLoic Poulain 	struct sk_buff *skbagg_tail;
367ffa7542SLoic Poulain 	struct delayed_work rx_refill;
377ffa7542SLoic Poulain 	struct mhi_net_stats stats;
387ffa7542SLoic Poulain 	u32 rx_queue_sz;
397ffa7542SLoic Poulain 	int msg_enable;
407ffa7542SLoic Poulain 	unsigned int mru;
417ffa7542SLoic Poulain };
427ffa7542SLoic Poulain 
437ffa7542SLoic Poulain struct mhi_device_info {
447ffa7542SLoic Poulain 	const char *netname;
457ffa7542SLoic Poulain };
467ffa7542SLoic Poulain 
mhi_ndo_open(struct net_device * ndev)477ffa7542SLoic Poulain static int mhi_ndo_open(struct net_device *ndev)
487ffa7542SLoic Poulain {
497ffa7542SLoic Poulain 	struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
507ffa7542SLoic Poulain 
517ffa7542SLoic Poulain 	/* Feed the rx buffer pool */
527ffa7542SLoic Poulain 	schedule_delayed_work(&mhi_netdev->rx_refill, 0);
537ffa7542SLoic Poulain 
547ffa7542SLoic Poulain 	/* Carrier is established via out-of-band channel (e.g. qmi) */
557ffa7542SLoic Poulain 	netif_carrier_on(ndev);
567ffa7542SLoic Poulain 
577ffa7542SLoic Poulain 	netif_start_queue(ndev);
587ffa7542SLoic Poulain 
597ffa7542SLoic Poulain 	return 0;
607ffa7542SLoic Poulain }
617ffa7542SLoic Poulain 
mhi_ndo_stop(struct net_device * ndev)627ffa7542SLoic Poulain static int mhi_ndo_stop(struct net_device *ndev)
637ffa7542SLoic Poulain {
647ffa7542SLoic Poulain 	struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
657ffa7542SLoic Poulain 
667ffa7542SLoic Poulain 	netif_stop_queue(ndev);
677ffa7542SLoic Poulain 	netif_carrier_off(ndev);
687ffa7542SLoic Poulain 	cancel_delayed_work_sync(&mhi_netdev->rx_refill);
697ffa7542SLoic Poulain 
707ffa7542SLoic Poulain 	return 0;
717ffa7542SLoic Poulain }
727ffa7542SLoic Poulain 
mhi_ndo_xmit(struct sk_buff * skb,struct net_device * ndev)737ffa7542SLoic Poulain static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
747ffa7542SLoic Poulain {
757ffa7542SLoic Poulain 	struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
767ffa7542SLoic Poulain 	struct mhi_device *mdev = mhi_netdev->mdev;
777ffa7542SLoic Poulain 	int err;
787ffa7542SLoic Poulain 
797ffa7542SLoic Poulain 	err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
807ffa7542SLoic Poulain 	if (unlikely(err)) {
817ffa7542SLoic Poulain 		net_err_ratelimited("%s: Failed to queue TX buf (%d)\n",
827ffa7542SLoic Poulain 				    ndev->name, err);
837ffa7542SLoic Poulain 		dev_kfree_skb_any(skb);
847ffa7542SLoic Poulain 		goto exit_drop;
857ffa7542SLoic Poulain 	}
867ffa7542SLoic Poulain 
877ffa7542SLoic Poulain 	if (mhi_queue_is_full(mdev, DMA_TO_DEVICE))
887ffa7542SLoic Poulain 		netif_stop_queue(ndev);
897ffa7542SLoic Poulain 
907ffa7542SLoic Poulain 	return NETDEV_TX_OK;
917ffa7542SLoic Poulain 
927ffa7542SLoic Poulain exit_drop:
937ffa7542SLoic Poulain 	u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
947ffa7542SLoic Poulain 	u64_stats_inc(&mhi_netdev->stats.tx_dropped);
957ffa7542SLoic Poulain 	u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
967ffa7542SLoic Poulain 
977ffa7542SLoic Poulain 	return NETDEV_TX_OK;
987ffa7542SLoic Poulain }
997ffa7542SLoic Poulain 
mhi_ndo_get_stats64(struct net_device * ndev,struct rtnl_link_stats64 * stats)1007ffa7542SLoic Poulain static void mhi_ndo_get_stats64(struct net_device *ndev,
1017ffa7542SLoic Poulain 				struct rtnl_link_stats64 *stats)
1027ffa7542SLoic Poulain {
1037ffa7542SLoic Poulain 	struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
1047ffa7542SLoic Poulain 	unsigned int start;
1057ffa7542SLoic Poulain 
1067ffa7542SLoic Poulain 	do {
107068c38adSThomas Gleixner 		start = u64_stats_fetch_begin(&mhi_netdev->stats.rx_syncp);
1087ffa7542SLoic Poulain 		stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets);
1097ffa7542SLoic Poulain 		stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes);
1107ffa7542SLoic Poulain 		stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors);
111068c38adSThomas Gleixner 	} while (u64_stats_fetch_retry(&mhi_netdev->stats.rx_syncp, start));
1127ffa7542SLoic Poulain 
1137ffa7542SLoic Poulain 	do {
114068c38adSThomas Gleixner 		start = u64_stats_fetch_begin(&mhi_netdev->stats.tx_syncp);
1157ffa7542SLoic Poulain 		stats->tx_packets = u64_stats_read(&mhi_netdev->stats.tx_packets);
1167ffa7542SLoic Poulain 		stats->tx_bytes = u64_stats_read(&mhi_netdev->stats.tx_bytes);
1177ffa7542SLoic Poulain 		stats->tx_errors = u64_stats_read(&mhi_netdev->stats.tx_errors);
1187ffa7542SLoic Poulain 		stats->tx_dropped = u64_stats_read(&mhi_netdev->stats.tx_dropped);
119068c38adSThomas Gleixner 	} while (u64_stats_fetch_retry(&mhi_netdev->stats.tx_syncp, start));
1207ffa7542SLoic Poulain }
1217ffa7542SLoic Poulain 
1227ffa7542SLoic Poulain static const struct net_device_ops mhi_netdev_ops = {
1237ffa7542SLoic Poulain 	.ndo_open               = mhi_ndo_open,
1247ffa7542SLoic Poulain 	.ndo_stop               = mhi_ndo_stop,
1257ffa7542SLoic Poulain 	.ndo_start_xmit         = mhi_ndo_xmit,
1267ffa7542SLoic Poulain 	.ndo_get_stats64	= mhi_ndo_get_stats64,
1277ffa7542SLoic Poulain };
1287ffa7542SLoic Poulain 
mhi_net_setup(struct net_device * ndev)1297ffa7542SLoic Poulain static void mhi_net_setup(struct net_device *ndev)
1307ffa7542SLoic Poulain {
1317ffa7542SLoic Poulain 	ndev->header_ops = NULL;  /* No header */
1327ffa7542SLoic Poulain 	ndev->type = ARPHRD_RAWIP;
1337ffa7542SLoic Poulain 	ndev->hard_header_len = 0;
1347ffa7542SLoic Poulain 	ndev->addr_len = 0;
1357ffa7542SLoic Poulain 	ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
1367ffa7542SLoic Poulain 	ndev->netdev_ops = &mhi_netdev_ops;
1377ffa7542SLoic Poulain 	ndev->mtu = MHI_NET_DEFAULT_MTU;
1387ffa7542SLoic Poulain 	ndev->min_mtu = MHI_NET_MIN_MTU;
1397ffa7542SLoic Poulain 	ndev->max_mtu = MHI_NET_MAX_MTU;
1407ffa7542SLoic Poulain 	ndev->tx_queue_len = 1000;
1417ffa7542SLoic Poulain }
1427ffa7542SLoic Poulain 
mhi_net_skb_agg(struct mhi_net_dev * mhi_netdev,struct sk_buff * skb)1437ffa7542SLoic Poulain static struct sk_buff *mhi_net_skb_agg(struct mhi_net_dev *mhi_netdev,
1447ffa7542SLoic Poulain 				       struct sk_buff *skb)
1457ffa7542SLoic Poulain {
1467ffa7542SLoic Poulain 	struct sk_buff *head = mhi_netdev->skbagg_head;
1477ffa7542SLoic Poulain 	struct sk_buff *tail = mhi_netdev->skbagg_tail;
1487ffa7542SLoic Poulain 
1497ffa7542SLoic Poulain 	/* This is non-paged skb chaining using frag_list */
1507ffa7542SLoic Poulain 	if (!head) {
1517ffa7542SLoic Poulain 		mhi_netdev->skbagg_head = skb;
1527ffa7542SLoic Poulain 		return skb;
1537ffa7542SLoic Poulain 	}
1547ffa7542SLoic Poulain 
1557ffa7542SLoic Poulain 	if (!skb_shinfo(head)->frag_list)
1567ffa7542SLoic Poulain 		skb_shinfo(head)->frag_list = skb;
1577ffa7542SLoic Poulain 	else
1587ffa7542SLoic Poulain 		tail->next = skb;
1597ffa7542SLoic Poulain 
1607ffa7542SLoic Poulain 	head->len += skb->len;
1617ffa7542SLoic Poulain 	head->data_len += skb->len;
1627ffa7542SLoic Poulain 	head->truesize += skb->truesize;
1637ffa7542SLoic Poulain 
1647ffa7542SLoic Poulain 	mhi_netdev->skbagg_tail = skb;
1657ffa7542SLoic Poulain 
1667ffa7542SLoic Poulain 	return mhi_netdev->skbagg_head;
1677ffa7542SLoic Poulain }
1687ffa7542SLoic Poulain 
mhi_net_dl_callback(struct mhi_device * mhi_dev,struct mhi_result * mhi_res)1697ffa7542SLoic Poulain static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
1707ffa7542SLoic Poulain 				struct mhi_result *mhi_res)
1717ffa7542SLoic Poulain {
1727ffa7542SLoic Poulain 	struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
1737ffa7542SLoic Poulain 	struct sk_buff *skb = mhi_res->buf_addr;
1747ffa7542SLoic Poulain 	int free_desc_count;
1757ffa7542SLoic Poulain 
1767ffa7542SLoic Poulain 	free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
1777ffa7542SLoic Poulain 
1787ffa7542SLoic Poulain 	if (unlikely(mhi_res->transaction_status)) {
1797ffa7542SLoic Poulain 		switch (mhi_res->transaction_status) {
1807ffa7542SLoic Poulain 		case -EOVERFLOW:
1817ffa7542SLoic Poulain 			/* Packet can not fit in one MHI buffer and has been
1827ffa7542SLoic Poulain 			 * split over multiple MHI transfers, do re-aggregation.
1837ffa7542SLoic Poulain 			 * That usually means the device side MTU is larger than
1847ffa7542SLoic Poulain 			 * the host side MTU/MRU. Since this is not optimal,
1857ffa7542SLoic Poulain 			 * print a warning (once).
1867ffa7542SLoic Poulain 			 */
1877ffa7542SLoic Poulain 			netdev_warn_once(mhi_netdev->ndev,
1887ffa7542SLoic Poulain 					 "Fragmented packets received, fix MTU?\n");
1897ffa7542SLoic Poulain 			skb_put(skb, mhi_res->bytes_xferd);
1907ffa7542SLoic Poulain 			mhi_net_skb_agg(mhi_netdev, skb);
1917ffa7542SLoic Poulain 			break;
1927ffa7542SLoic Poulain 		case -ENOTCONN:
1937ffa7542SLoic Poulain 			/* MHI layer stopping/resetting the DL channel */
1947ffa7542SLoic Poulain 			dev_kfree_skb_any(skb);
1957ffa7542SLoic Poulain 			return;
1967ffa7542SLoic Poulain 		default:
1977ffa7542SLoic Poulain 			/* Unknown error, simply drop */
1987ffa7542SLoic Poulain 			dev_kfree_skb_any(skb);
1997ffa7542SLoic Poulain 			u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
2007ffa7542SLoic Poulain 			u64_stats_inc(&mhi_netdev->stats.rx_errors);
2017ffa7542SLoic Poulain 			u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
2027ffa7542SLoic Poulain 		}
2037ffa7542SLoic Poulain 	} else {
2047ffa7542SLoic Poulain 		skb_put(skb, mhi_res->bytes_xferd);
2057ffa7542SLoic Poulain 
2067ffa7542SLoic Poulain 		if (mhi_netdev->skbagg_head) {
2077ffa7542SLoic Poulain 			/* Aggregate the final fragment */
2087ffa7542SLoic Poulain 			skb = mhi_net_skb_agg(mhi_netdev, skb);
2097ffa7542SLoic Poulain 			mhi_netdev->skbagg_head = NULL;
2107ffa7542SLoic Poulain 		}
2117ffa7542SLoic Poulain 
2127ffa7542SLoic Poulain 		switch (skb->data[0] & 0xf0) {
2137ffa7542SLoic Poulain 		case 0x40:
2147ffa7542SLoic Poulain 			skb->protocol = htons(ETH_P_IP);
2157ffa7542SLoic Poulain 			break;
2167ffa7542SLoic Poulain 		case 0x60:
2177ffa7542SLoic Poulain 			skb->protocol = htons(ETH_P_IPV6);
2187ffa7542SLoic Poulain 			break;
2197ffa7542SLoic Poulain 		default:
2207ffa7542SLoic Poulain 			skb->protocol = htons(ETH_P_MAP);
2217ffa7542SLoic Poulain 			break;
2227ffa7542SLoic Poulain 		}
2237ffa7542SLoic Poulain 
2247ffa7542SLoic Poulain 		u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
2257ffa7542SLoic Poulain 		u64_stats_inc(&mhi_netdev->stats.rx_packets);
2267ffa7542SLoic Poulain 		u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
2277ffa7542SLoic Poulain 		u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
228baebdf48SSebastian Andrzej Siewior 		__netif_rx(skb);
2297ffa7542SLoic Poulain 	}
2307ffa7542SLoic Poulain 
2317ffa7542SLoic Poulain 	/* Refill if RX buffers queue becomes low */
2327ffa7542SLoic Poulain 	if (free_desc_count >= mhi_netdev->rx_queue_sz / 2)
2337ffa7542SLoic Poulain 		schedule_delayed_work(&mhi_netdev->rx_refill, 0);
2347ffa7542SLoic Poulain }
2357ffa7542SLoic Poulain 
mhi_net_ul_callback(struct mhi_device * mhi_dev,struct mhi_result * mhi_res)2367ffa7542SLoic Poulain static void mhi_net_ul_callback(struct mhi_device *mhi_dev,
2377ffa7542SLoic Poulain 				struct mhi_result *mhi_res)
2387ffa7542SLoic Poulain {
2397ffa7542SLoic Poulain 	struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
2407ffa7542SLoic Poulain 	struct net_device *ndev = mhi_netdev->ndev;
2417ffa7542SLoic Poulain 	struct mhi_device *mdev = mhi_netdev->mdev;
2427ffa7542SLoic Poulain 	struct sk_buff *skb = mhi_res->buf_addr;
2437ffa7542SLoic Poulain 
2447ffa7542SLoic Poulain 	/* Hardware has consumed the buffer, so free the skb (which is not
2457ffa7542SLoic Poulain 	 * freed by the MHI stack) and perform accounting.
2467ffa7542SLoic Poulain 	 */
2477ffa7542SLoic Poulain 	dev_consume_skb_any(skb);
2487ffa7542SLoic Poulain 
2497ffa7542SLoic Poulain 	u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
2507ffa7542SLoic Poulain 	if (unlikely(mhi_res->transaction_status)) {
2517ffa7542SLoic Poulain 		/* MHI layer stopping/resetting the UL channel */
2527ffa7542SLoic Poulain 		if (mhi_res->transaction_status == -ENOTCONN) {
2537ffa7542SLoic Poulain 			u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
2547ffa7542SLoic Poulain 			return;
2557ffa7542SLoic Poulain 		}
2567ffa7542SLoic Poulain 
2577ffa7542SLoic Poulain 		u64_stats_inc(&mhi_netdev->stats.tx_errors);
2587ffa7542SLoic Poulain 	} else {
2597ffa7542SLoic Poulain 		u64_stats_inc(&mhi_netdev->stats.tx_packets);
2607ffa7542SLoic Poulain 		u64_stats_add(&mhi_netdev->stats.tx_bytes, mhi_res->bytes_xferd);
2617ffa7542SLoic Poulain 	}
2627ffa7542SLoic Poulain 	u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
2637ffa7542SLoic Poulain 
2647ffa7542SLoic Poulain 	if (netif_queue_stopped(ndev) && !mhi_queue_is_full(mdev, DMA_TO_DEVICE))
2657ffa7542SLoic Poulain 		netif_wake_queue(ndev);
2667ffa7542SLoic Poulain }
2677ffa7542SLoic Poulain 
mhi_net_rx_refill_work(struct work_struct * work)2687ffa7542SLoic Poulain static void mhi_net_rx_refill_work(struct work_struct *work)
2697ffa7542SLoic Poulain {
2707ffa7542SLoic Poulain 	struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev,
2717ffa7542SLoic Poulain 						      rx_refill.work);
2727ffa7542SLoic Poulain 	struct net_device *ndev = mhi_netdev->ndev;
2737ffa7542SLoic Poulain 	struct mhi_device *mdev = mhi_netdev->mdev;
2747ffa7542SLoic Poulain 	struct sk_buff *skb;
2757ffa7542SLoic Poulain 	unsigned int size;
2767ffa7542SLoic Poulain 	int err;
2777ffa7542SLoic Poulain 
2787ffa7542SLoic Poulain 	size = mhi_netdev->mru ? mhi_netdev->mru : READ_ONCE(ndev->mtu);
2797ffa7542SLoic Poulain 
2807ffa7542SLoic Poulain 	while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) {
2817ffa7542SLoic Poulain 		skb = netdev_alloc_skb(ndev, size);
2827ffa7542SLoic Poulain 		if (unlikely(!skb))
2837ffa7542SLoic Poulain 			break;
2847ffa7542SLoic Poulain 
2857ffa7542SLoic Poulain 		err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, size, MHI_EOT);
2867ffa7542SLoic Poulain 		if (unlikely(err)) {
2877ffa7542SLoic Poulain 			net_err_ratelimited("%s: Failed to queue RX buf (%d)\n",
2887ffa7542SLoic Poulain 					    ndev->name, err);
2897ffa7542SLoic Poulain 			kfree_skb(skb);
2907ffa7542SLoic Poulain 			break;
2917ffa7542SLoic Poulain 		}
2927ffa7542SLoic Poulain 
2937ffa7542SLoic Poulain 		/* Do not hog the CPU if rx buffers are consumed faster than
2947ffa7542SLoic Poulain 		 * queued (unlikely).
2957ffa7542SLoic Poulain 		 */
2967ffa7542SLoic Poulain 		cond_resched();
2977ffa7542SLoic Poulain 	}
2987ffa7542SLoic Poulain 
2997ffa7542SLoic Poulain 	/* If we're still starved of rx buffers, reschedule later */
3007ffa7542SLoic Poulain 	if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mhi_netdev->rx_queue_sz)
3017ffa7542SLoic Poulain 		schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
3027ffa7542SLoic Poulain }
3037ffa7542SLoic Poulain 
mhi_net_newlink(struct mhi_device * mhi_dev,struct net_device * ndev)3047ffa7542SLoic Poulain static int mhi_net_newlink(struct mhi_device *mhi_dev, struct net_device *ndev)
3057ffa7542SLoic Poulain {
3067ffa7542SLoic Poulain 	struct mhi_net_dev *mhi_netdev;
3077ffa7542SLoic Poulain 	int err;
3087ffa7542SLoic Poulain 
3097ffa7542SLoic Poulain 	mhi_netdev = netdev_priv(ndev);
3107ffa7542SLoic Poulain 
3117ffa7542SLoic Poulain 	dev_set_drvdata(&mhi_dev->dev, mhi_netdev);
3127ffa7542SLoic Poulain 	mhi_netdev->ndev = ndev;
3137ffa7542SLoic Poulain 	mhi_netdev->mdev = mhi_dev;
3147ffa7542SLoic Poulain 	mhi_netdev->skbagg_head = NULL;
3157ffa7542SLoic Poulain 	mhi_netdev->mru = mhi_dev->mhi_cntrl->mru;
3167ffa7542SLoic Poulain 
3177ffa7542SLoic Poulain 	INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work);
3187ffa7542SLoic Poulain 	u64_stats_init(&mhi_netdev->stats.rx_syncp);
3197ffa7542SLoic Poulain 	u64_stats_init(&mhi_netdev->stats.tx_syncp);
3207ffa7542SLoic Poulain 
3217ffa7542SLoic Poulain 	/* Start MHI channels */
32297c78d0aSJakub Kicinski 	err = mhi_prepare_for_transfer(mhi_dev);
3237ffa7542SLoic Poulain 	if (err)
3244526fe74SDaniele Palmas 		return err;
3257ffa7542SLoic Poulain 
3267ffa7542SLoic Poulain 	/* Number of transfer descriptors determines size of the queue */
3277ffa7542SLoic Poulain 	mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
3287ffa7542SLoic Poulain 
3297ffa7542SLoic Poulain 	err = register_netdev(ndev);
3307ffa7542SLoic Poulain 	if (err)
3317ffa7542SLoic Poulain 		return err;
3327ffa7542SLoic Poulain 
3337ffa7542SLoic Poulain 	return 0;
3347ffa7542SLoic Poulain }
3357ffa7542SLoic Poulain 
mhi_net_dellink(struct mhi_device * mhi_dev,struct net_device * ndev)3367ffa7542SLoic Poulain static void mhi_net_dellink(struct mhi_device *mhi_dev, struct net_device *ndev)
3377ffa7542SLoic Poulain {
3387ffa7542SLoic Poulain 	struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
3397ffa7542SLoic Poulain 
3407ffa7542SLoic Poulain 	unregister_netdev(ndev);
3417ffa7542SLoic Poulain 
3427ffa7542SLoic Poulain 	mhi_unprepare_from_transfer(mhi_dev);
3437ffa7542SLoic Poulain 
3447ffa7542SLoic Poulain 	kfree_skb(mhi_netdev->skbagg_head);
3457ffa7542SLoic Poulain 
346*f7c125bdSWei Yongjun 	free_netdev(ndev);
347*f7c125bdSWei Yongjun 
3487ffa7542SLoic Poulain 	dev_set_drvdata(&mhi_dev->dev, NULL);
3497ffa7542SLoic Poulain }
3507ffa7542SLoic Poulain 
mhi_net_probe(struct mhi_device * mhi_dev,const struct mhi_device_id * id)3517ffa7542SLoic Poulain static int mhi_net_probe(struct mhi_device *mhi_dev,
3527ffa7542SLoic Poulain 			 const struct mhi_device_id *id)
3537ffa7542SLoic Poulain {
3547ffa7542SLoic Poulain 	const struct mhi_device_info *info = (struct mhi_device_info *)id->driver_data;
3557ffa7542SLoic Poulain 	struct net_device *ndev;
3567ffa7542SLoic Poulain 	int err;
3577ffa7542SLoic Poulain 
3587ffa7542SLoic Poulain 	ndev = alloc_netdev(sizeof(struct mhi_net_dev), info->netname,
3597ffa7542SLoic Poulain 			    NET_NAME_PREDICTABLE, mhi_net_setup);
3607ffa7542SLoic Poulain 	if (!ndev)
3617ffa7542SLoic Poulain 		return -ENOMEM;
3627ffa7542SLoic Poulain 
3637ffa7542SLoic Poulain 	SET_NETDEV_DEV(ndev, &mhi_dev->dev);
3647ffa7542SLoic Poulain 
3657ffa7542SLoic Poulain 	err = mhi_net_newlink(mhi_dev, ndev);
3667ffa7542SLoic Poulain 	if (err) {
3677ffa7542SLoic Poulain 		free_netdev(ndev);
3687ffa7542SLoic Poulain 		return err;
3697ffa7542SLoic Poulain 	}
3707ffa7542SLoic Poulain 
3717ffa7542SLoic Poulain 	return 0;
3727ffa7542SLoic Poulain }
3737ffa7542SLoic Poulain 
mhi_net_remove(struct mhi_device * mhi_dev)3747ffa7542SLoic Poulain static void mhi_net_remove(struct mhi_device *mhi_dev)
3757ffa7542SLoic Poulain {
3767ffa7542SLoic Poulain 	struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
3777ffa7542SLoic Poulain 
3787ffa7542SLoic Poulain 	mhi_net_dellink(mhi_dev, mhi_netdev->ndev);
3797ffa7542SLoic Poulain }
3807ffa7542SLoic Poulain 
3817ffa7542SLoic Poulain static const struct mhi_device_info mhi_hwip0 = {
3827ffa7542SLoic Poulain 	.netname = "mhi_hwip%d",
3837ffa7542SLoic Poulain };
3847ffa7542SLoic Poulain 
3857ffa7542SLoic Poulain static const struct mhi_device_info mhi_swip0 = {
3867ffa7542SLoic Poulain 	.netname = "mhi_swip%d",
3877ffa7542SLoic Poulain };
3887ffa7542SLoic Poulain 
3897ffa7542SLoic Poulain static const struct mhi_device_id mhi_net_id_table[] = {
3907ffa7542SLoic Poulain 	/* Hardware accelerated data PATH (to modem IPA), protocol agnostic */
3917ffa7542SLoic Poulain 	{ .chan = "IP_HW0", .driver_data = (kernel_ulong_t)&mhi_hwip0 },
3927ffa7542SLoic Poulain 	/* Software data PATH (to modem CPU) */
3937ffa7542SLoic Poulain 	{ .chan = "IP_SW0", .driver_data = (kernel_ulong_t)&mhi_swip0 },
3947ffa7542SLoic Poulain 	{}
3957ffa7542SLoic Poulain };
3967ffa7542SLoic Poulain MODULE_DEVICE_TABLE(mhi, mhi_net_id_table);
3977ffa7542SLoic Poulain 
3987ffa7542SLoic Poulain static struct mhi_driver mhi_net_driver = {
3997ffa7542SLoic Poulain 	.probe = mhi_net_probe,
4007ffa7542SLoic Poulain 	.remove = mhi_net_remove,
4017ffa7542SLoic Poulain 	.dl_xfer_cb = mhi_net_dl_callback,
4027ffa7542SLoic Poulain 	.ul_xfer_cb = mhi_net_ul_callback,
4037ffa7542SLoic Poulain 	.id_table = mhi_net_id_table,
4047ffa7542SLoic Poulain 	.driver = {
4057ffa7542SLoic Poulain 		.name = "mhi_net",
4067ffa7542SLoic Poulain 	},
4077ffa7542SLoic Poulain };
4087ffa7542SLoic Poulain 
4097ffa7542SLoic Poulain module_mhi_driver(mhi_net_driver);
4107ffa7542SLoic Poulain 
4117ffa7542SLoic Poulain MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
4127ffa7542SLoic Poulain MODULE_DESCRIPTION("Network over MHI");
4137ffa7542SLoic Poulain MODULE_LICENSE("GPL v2");
414