1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* MHI Network driver - Network over MHI bus 3 * 4 * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org> 5 */ 6 7 #include <linux/if_arp.h> 8 #include <linux/mhi.h> 9 #include <linux/mod_devicetable.h> 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <linux/skbuff.h> 13 #include <linux/u64_stats_sync.h> 14 15 #define MHI_NET_MIN_MTU ETH_MIN_MTU 16 #define MHI_NET_MAX_MTU 0xffff 17 #define MHI_NET_DEFAULT_MTU 0x4000 18 19 struct mhi_net_stats { 20 u64_stats_t rx_packets; 21 u64_stats_t rx_bytes; 22 u64_stats_t rx_errors; 23 u64_stats_t rx_dropped; 24 u64_stats_t tx_packets; 25 u64_stats_t tx_bytes; 26 u64_stats_t tx_errors; 27 u64_stats_t tx_dropped; 28 atomic_t rx_queued; 29 struct u64_stats_sync tx_syncp; 30 struct u64_stats_sync rx_syncp; 31 }; 32 33 struct mhi_net_dev { 34 struct mhi_device *mdev; 35 struct net_device *ndev; 36 struct delayed_work rx_refill; 37 struct mhi_net_stats stats; 38 u32 rx_queue_sz; 39 }; 40 41 static int mhi_ndo_open(struct net_device *ndev) 42 { 43 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); 44 45 /* Feed the rx buffer pool */ 46 schedule_delayed_work(&mhi_netdev->rx_refill, 0); 47 48 /* Carrier is established via out-of-band channel (e.g. qmi) */ 49 netif_carrier_on(ndev); 50 51 netif_start_queue(ndev); 52 53 return 0; 54 } 55 56 static int mhi_ndo_stop(struct net_device *ndev) 57 { 58 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); 59 60 netif_stop_queue(ndev); 61 netif_carrier_off(ndev); 62 cancel_delayed_work_sync(&mhi_netdev->rx_refill); 63 64 return 0; 65 } 66 67 static int mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev) 68 { 69 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); 70 struct mhi_device *mdev = mhi_netdev->mdev; 71 int err; 72 73 err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT); 74 if (unlikely(err)) { 75 net_err_ratelimited("%s: Failed to queue TX buf (%d)\n", 76 ndev->name, err); 77 78 u64_stats_update_begin(&mhi_netdev->stats.tx_syncp); 79 u64_stats_inc(&mhi_netdev->stats.tx_dropped); 80 u64_stats_update_end(&mhi_netdev->stats.tx_syncp); 81 82 /* drop the packet */ 83 dev_kfree_skb_any(skb); 84 } 85 86 if (mhi_queue_is_full(mdev, DMA_TO_DEVICE)) 87 netif_stop_queue(ndev); 88 89 return NETDEV_TX_OK; 90 } 91 92 static void mhi_ndo_get_stats64(struct net_device *ndev, 93 struct rtnl_link_stats64 *stats) 94 { 95 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); 96 unsigned int start; 97 98 do { 99 start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.rx_syncp); 100 stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets); 101 stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes); 102 stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors); 103 stats->rx_dropped = u64_stats_read(&mhi_netdev->stats.rx_dropped); 104 } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.rx_syncp, start)); 105 106 do { 107 start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.tx_syncp); 108 stats->tx_packets = u64_stats_read(&mhi_netdev->stats.tx_packets); 109 stats->tx_bytes = u64_stats_read(&mhi_netdev->stats.tx_bytes); 110 stats->tx_errors = u64_stats_read(&mhi_netdev->stats.tx_errors); 111 stats->tx_dropped = u64_stats_read(&mhi_netdev->stats.tx_dropped); 112 } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.tx_syncp, start)); 113 } 114 115 static const struct net_device_ops mhi_netdev_ops = { 116 .ndo_open = mhi_ndo_open, 117 .ndo_stop = mhi_ndo_stop, 118 .ndo_start_xmit = mhi_ndo_xmit, 119 .ndo_get_stats64 = mhi_ndo_get_stats64, 120 }; 121 122 static void mhi_net_setup(struct net_device *ndev) 123 { 124 ndev->header_ops = NULL; /* No header */ 125 ndev->type = ARPHRD_RAWIP; 126 ndev->hard_header_len = 0; 127 ndev->addr_len = 0; 128 ndev->flags = IFF_POINTOPOINT | IFF_NOARP; 129 ndev->netdev_ops = &mhi_netdev_ops; 130 ndev->mtu = MHI_NET_DEFAULT_MTU; 131 ndev->min_mtu = MHI_NET_MIN_MTU; 132 ndev->max_mtu = MHI_NET_MAX_MTU; 133 ndev->tx_queue_len = 1000; 134 } 135 136 static void mhi_net_dl_callback(struct mhi_device *mhi_dev, 137 struct mhi_result *mhi_res) 138 { 139 struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); 140 struct sk_buff *skb = mhi_res->buf_addr; 141 int remaining; 142 143 remaining = atomic_dec_return(&mhi_netdev->stats.rx_queued); 144 145 if (unlikely(mhi_res->transaction_status)) { 146 dev_kfree_skb_any(skb); 147 148 /* MHI layer stopping/resetting the DL channel */ 149 if (mhi_res->transaction_status == -ENOTCONN) 150 return; 151 152 u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); 153 u64_stats_inc(&mhi_netdev->stats.rx_errors); 154 u64_stats_update_end(&mhi_netdev->stats.rx_syncp); 155 } else { 156 u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); 157 u64_stats_inc(&mhi_netdev->stats.rx_packets); 158 u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd); 159 u64_stats_update_end(&mhi_netdev->stats.rx_syncp); 160 161 switch (skb->data[0] & 0xf0) { 162 case 0x40: 163 skb->protocol = htons(ETH_P_IP); 164 break; 165 case 0x60: 166 skb->protocol = htons(ETH_P_IPV6); 167 break; 168 default: 169 skb->protocol = htons(ETH_P_MAP); 170 break; 171 } 172 173 skb_put(skb, mhi_res->bytes_xferd); 174 netif_rx(skb); 175 } 176 177 /* Refill if RX buffers queue becomes low */ 178 if (remaining <= mhi_netdev->rx_queue_sz / 2) 179 schedule_delayed_work(&mhi_netdev->rx_refill, 0); 180 } 181 182 static void mhi_net_ul_callback(struct mhi_device *mhi_dev, 183 struct mhi_result *mhi_res) 184 { 185 struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); 186 struct net_device *ndev = mhi_netdev->ndev; 187 struct mhi_device *mdev = mhi_netdev->mdev; 188 struct sk_buff *skb = mhi_res->buf_addr; 189 190 /* Hardware has consumed the buffer, so free the skb (which is not 191 * freed by the MHI stack) and perform accounting. 192 */ 193 dev_consume_skb_any(skb); 194 195 u64_stats_update_begin(&mhi_netdev->stats.tx_syncp); 196 if (unlikely(mhi_res->transaction_status)) { 197 198 /* MHI layer stopping/resetting the UL channel */ 199 if (mhi_res->transaction_status == -ENOTCONN) { 200 u64_stats_update_end(&mhi_netdev->stats.tx_syncp); 201 return; 202 } 203 204 u64_stats_inc(&mhi_netdev->stats.tx_errors); 205 } else { 206 u64_stats_inc(&mhi_netdev->stats.tx_packets); 207 u64_stats_add(&mhi_netdev->stats.tx_bytes, mhi_res->bytes_xferd); 208 } 209 u64_stats_update_end(&mhi_netdev->stats.tx_syncp); 210 211 if (netif_queue_stopped(ndev) && !mhi_queue_is_full(mdev, DMA_TO_DEVICE)) 212 netif_wake_queue(ndev); 213 } 214 215 static void mhi_net_rx_refill_work(struct work_struct *work) 216 { 217 struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev, 218 rx_refill.work); 219 struct net_device *ndev = mhi_netdev->ndev; 220 struct mhi_device *mdev = mhi_netdev->mdev; 221 int size = READ_ONCE(ndev->mtu); 222 struct sk_buff *skb; 223 int err; 224 225 while (atomic_read(&mhi_netdev->stats.rx_queued) < mhi_netdev->rx_queue_sz) { 226 skb = netdev_alloc_skb(ndev, size); 227 if (unlikely(!skb)) 228 break; 229 230 err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, size, MHI_EOT); 231 if (unlikely(err)) { 232 net_err_ratelimited("%s: Failed to queue RX buf (%d)\n", 233 ndev->name, err); 234 kfree_skb(skb); 235 break; 236 } 237 238 atomic_inc(&mhi_netdev->stats.rx_queued); 239 240 /* Do not hog the CPU if rx buffers are consumed faster than 241 * queued (unlikely). 242 */ 243 cond_resched(); 244 } 245 246 /* If we're still starved of rx buffers, reschedule later */ 247 if (unlikely(!atomic_read(&mhi_netdev->stats.rx_queued))) 248 schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2); 249 } 250 251 static struct device_type wwan_type = { 252 .name = "wwan", 253 }; 254 255 static int mhi_net_probe(struct mhi_device *mhi_dev, 256 const struct mhi_device_id *id) 257 { 258 const char *netname = (char *)id->driver_data; 259 struct device *dev = &mhi_dev->dev; 260 struct mhi_net_dev *mhi_netdev; 261 struct net_device *ndev; 262 int err; 263 264 ndev = alloc_netdev(sizeof(*mhi_netdev), netname, NET_NAME_PREDICTABLE, 265 mhi_net_setup); 266 if (!ndev) 267 return -ENOMEM; 268 269 mhi_netdev = netdev_priv(ndev); 270 dev_set_drvdata(dev, mhi_netdev); 271 mhi_netdev->ndev = ndev; 272 mhi_netdev->mdev = mhi_dev; 273 SET_NETDEV_DEV(ndev, &mhi_dev->dev); 274 SET_NETDEV_DEVTYPE(ndev, &wwan_type); 275 276 /* All MHI net channels have 128 ring elements (at least for now) */ 277 mhi_netdev->rx_queue_sz = 128; 278 279 INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work); 280 u64_stats_init(&mhi_netdev->stats.rx_syncp); 281 u64_stats_init(&mhi_netdev->stats.tx_syncp); 282 283 /* Start MHI channels */ 284 err = mhi_prepare_for_transfer(mhi_dev); 285 if (err) 286 goto out_err; 287 288 err = register_netdev(ndev); 289 if (err) 290 goto out_err; 291 292 return 0; 293 294 out_err: 295 free_netdev(ndev); 296 return err; 297 } 298 299 static void mhi_net_remove(struct mhi_device *mhi_dev) 300 { 301 struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); 302 303 unregister_netdev(mhi_netdev->ndev); 304 305 mhi_unprepare_from_transfer(mhi_netdev->mdev); 306 307 free_netdev(mhi_netdev->ndev); 308 } 309 310 static const struct mhi_device_id mhi_net_id_table[] = { 311 { .chan = "IP_HW0", .driver_data = (kernel_ulong_t)"mhi_hwip%d" }, 312 { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)"mhi_swip%d" }, 313 {} 314 }; 315 MODULE_DEVICE_TABLE(mhi, mhi_net_id_table); 316 317 static struct mhi_driver mhi_net_driver = { 318 .probe = mhi_net_probe, 319 .remove = mhi_net_remove, 320 .dl_xfer_cb = mhi_net_dl_callback, 321 .ul_xfer_cb = mhi_net_ul_callback, 322 .id_table = mhi_net_id_table, 323 .driver = { 324 .name = "mhi_net", 325 .owner = THIS_MODULE, 326 }, 327 }; 328 329 module_mhi_driver(mhi_net_driver); 330 331 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>"); 332 MODULE_DESCRIPTION("Network over MHI"); 333 MODULE_LICENSE("GPL v2"); 334