xref: /openbmc/linux/drivers/net/wwan/mhi_wwan_mbim.c (revision 61f4d204)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* MHI MBIM Network driver - Network/MBIM over MHI bus
3  *
4  * Copyright (C) 2021 Linaro Ltd <loic.poulain@linaro.org>
5  *
6  * This driver copy some code from cdc_ncm, which is:
7  * Copyright (C) ST-Ericsson 2010-2012
8  * and cdc_mbim, which is:
9  * Copyright (c) 2012  Smith Micro Software, Inc.
10  * Copyright (c) 2012  Bjørn Mork <bjorn@mork.no>
11  *
12  */
13 
14 #include <linux/ethtool.h>
15 #include <linux/if_arp.h>
16 #include <linux/if_vlan.h>
17 #include <linux/ip.h>
18 #include <linux/mhi.h>
19 #include <linux/mii.h>
20 #include <linux/mod_devicetable.h>
21 #include <linux/module.h>
22 #include <linux/netdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/u64_stats_sync.h>
25 #include <linux/usb.h>
26 #include <linux/usb/cdc.h>
27 #include <linux/usb/usbnet.h>
28 #include <linux/usb/cdc_ncm.h>
29 #include <linux/wwan.h>
30 
31 /* 3500 allows to optimize skb allocation, the skbs will basically fit in
32  * one 4K page. Large MBIM packets will simply be split over several MHI
33  * transfers and chained by the MHI net layer (zerocopy).
34  */
35 #define MHI_DEFAULT_MRU 3500
36 
37 #define MHI_MBIM_DEFAULT_MTU 1500
38 #define MHI_MAX_BUF_SZ 0xffff
39 
40 #define MBIM_NDP16_SIGN_MASK 0x00ffffff
41 
42 #define MHI_MBIM_LINK_HASH_SIZE 8
43 #define LINK_HASH(session) ((session) % MHI_MBIM_LINK_HASH_SIZE)
44 
45 struct mhi_mbim_link {
46 	struct mhi_mbim_context *mbim;
47 	struct net_device *ndev;
48 	unsigned int session;
49 
50 	/* stats */
51 	u64_stats_t rx_packets;
52 	u64_stats_t rx_bytes;
53 	u64_stats_t rx_errors;
54 	u64_stats_t tx_packets;
55 	u64_stats_t tx_bytes;
56 	u64_stats_t tx_errors;
57 	u64_stats_t tx_dropped;
58 	struct u64_stats_sync tx_syncp;
59 	struct u64_stats_sync rx_syncp;
60 
61 	struct hlist_node hlnode;
62 };
63 
64 struct mhi_mbim_context {
65 	struct mhi_device *mdev;
66 	struct sk_buff *skbagg_head;
67 	struct sk_buff *skbagg_tail;
68 	unsigned int mru;
69 	u32 rx_queue_sz;
70 	u16 rx_seq;
71 	u16 tx_seq;
72 	struct delayed_work rx_refill;
73 	spinlock_t tx_lock;
74 	struct hlist_head link_list[MHI_MBIM_LINK_HASH_SIZE];
75 };
76 
77 struct mbim_tx_hdr {
78 	struct usb_cdc_ncm_nth16 nth16;
79 	struct usb_cdc_ncm_ndp16 ndp16;
80 	struct usb_cdc_ncm_dpe16 dpe16[2];
81 } __packed;
82 
83 static struct mhi_mbim_link *mhi_mbim_get_link_rcu(struct mhi_mbim_context *mbim,
84 						   unsigned int session)
85 {
86 	struct mhi_mbim_link *link;
87 
88 	hlist_for_each_entry_rcu(link, &mbim->link_list[LINK_HASH(session)], hlnode) {
89 		if (link->session == session)
90 			return link;
91 	}
92 
93 	return NULL;
94 }
95 
96 static struct sk_buff *mbim_tx_fixup(struct sk_buff *skb, unsigned int session,
97 				     u16 tx_seq)
98 {
99 	unsigned int dgram_size = skb->len;
100 	struct usb_cdc_ncm_nth16 *nth16;
101 	struct usb_cdc_ncm_ndp16 *ndp16;
102 	struct mbim_tx_hdr *mbim_hdr;
103 
104 	/* Only one NDP is sent, containing the IP packet (no aggregation) */
105 
106 	/* Ensure we have enough headroom for crafting MBIM header */
107 	if (skb_cow_head(skb, sizeof(struct mbim_tx_hdr))) {
108 		dev_kfree_skb_any(skb);
109 		return NULL;
110 	}
111 
112 	mbim_hdr = skb_push(skb, sizeof(struct mbim_tx_hdr));
113 
114 	/* Fill NTB header */
115 	nth16 = &mbim_hdr->nth16;
116 	nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
117 	nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
118 	nth16->wSequence = cpu_to_le16(tx_seq);
119 	nth16->wBlockLength = cpu_to_le16(skb->len);
120 	nth16->wNdpIndex = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
121 
122 	/* Fill the unique NDP */
123 	ndp16 = &mbim_hdr->ndp16;
124 	ndp16->dwSignature = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN | (session << 24));
125 	ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16)
126 					+ sizeof(struct usb_cdc_ncm_dpe16) * 2);
127 	ndp16->wNextNdpIndex = 0;
128 
129 	/* Datagram follows the mbim header */
130 	ndp16->dpe16[0].wDatagramIndex = cpu_to_le16(sizeof(struct mbim_tx_hdr));
131 	ndp16->dpe16[0].wDatagramLength = cpu_to_le16(dgram_size);
132 
133 	/* null termination */
134 	ndp16->dpe16[1].wDatagramIndex = 0;
135 	ndp16->dpe16[1].wDatagramLength = 0;
136 
137 	return skb;
138 }
139 
140 static netdev_tx_t mhi_mbim_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
141 {
142 	struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
143 	struct mhi_mbim_context *mbim = link->mbim;
144 	unsigned long flags;
145 	int err = -ENOMEM;
146 
147 	/* Serialize MHI channel queuing and MBIM seq */
148 	spin_lock_irqsave(&mbim->tx_lock, flags);
149 
150 	skb = mbim_tx_fixup(skb, link->session, mbim->tx_seq);
151 	if (unlikely(!skb))
152 		goto exit_unlock;
153 
154 	err = mhi_queue_skb(mbim->mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
155 
156 	if (mhi_queue_is_full(mbim->mdev, DMA_TO_DEVICE))
157 		netif_stop_queue(ndev);
158 
159 	if (!err)
160 		mbim->tx_seq++;
161 
162 exit_unlock:
163 	spin_unlock_irqrestore(&mbim->tx_lock, flags);
164 
165 	if (unlikely(err)) {
166 		net_err_ratelimited("%s: Failed to queue TX buf (%d)\n",
167 				    ndev->name, err);
168 		dev_kfree_skb_any(skb);
169 		goto exit_drop;
170 	}
171 
172 	return NETDEV_TX_OK;
173 
174 exit_drop:
175 	u64_stats_update_begin(&link->tx_syncp);
176 	u64_stats_inc(&link->tx_dropped);
177 	u64_stats_update_end(&link->tx_syncp);
178 
179 	return NETDEV_TX_OK;
180 }
181 
182 static int mbim_rx_verify_nth16(struct mhi_mbim_context *mbim, struct sk_buff *skb)
183 {
184 	struct usb_cdc_ncm_nth16 *nth16;
185 	int len;
186 
187 	if (skb->len < sizeof(struct usb_cdc_ncm_nth16) +
188 			sizeof(struct usb_cdc_ncm_ndp16)) {
189 		net_err_ratelimited("frame too short\n");
190 		return -EINVAL;
191 	}
192 
193 	nth16 = (struct usb_cdc_ncm_nth16 *)skb->data;
194 
195 	if (nth16->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN)) {
196 		net_err_ratelimited("invalid NTH16 signature <%#010x>\n",
197 				    le32_to_cpu(nth16->dwSignature));
198 		return -EINVAL;
199 	}
200 
201 	/* No limit on the block length, except the size of the data pkt */
202 	len = le16_to_cpu(nth16->wBlockLength);
203 	if (len > skb->len) {
204 		net_err_ratelimited("NTB does not fit into the skb %u/%u\n",
205 				    len, skb->len);
206 		return -EINVAL;
207 	}
208 
209 	if (mbim->rx_seq + 1 != le16_to_cpu(nth16->wSequence) &&
210 	    (mbim->rx_seq || le16_to_cpu(nth16->wSequence)) &&
211 	    !(mbim->rx_seq == 0xffff && !le16_to_cpu(nth16->wSequence))) {
212 		net_err_ratelimited("sequence number glitch prev=%d curr=%d\n",
213 				    mbim->rx_seq, le16_to_cpu(nth16->wSequence));
214 	}
215 	mbim->rx_seq = le16_to_cpu(nth16->wSequence);
216 
217 	return le16_to_cpu(nth16->wNdpIndex);
218 }
219 
220 static int mbim_rx_verify_ndp16(struct sk_buff *skb, struct usb_cdc_ncm_ndp16 *ndp16)
221 {
222 	int ret;
223 
224 	if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) {
225 		net_err_ratelimited("invalid DPT16 length <%u>\n",
226 				    le16_to_cpu(ndp16->wLength));
227 		return -EINVAL;
228 	}
229 
230 	ret = ((le16_to_cpu(ndp16->wLength) - sizeof(struct usb_cdc_ncm_ndp16))
231 			/ sizeof(struct usb_cdc_ncm_dpe16));
232 	ret--; /* Last entry is always a NULL terminator */
233 
234 	if (sizeof(struct usb_cdc_ncm_ndp16) +
235 	     ret * sizeof(struct usb_cdc_ncm_dpe16) > skb->len) {
236 		net_err_ratelimited("Invalid nframes = %d\n", ret);
237 		return -EINVAL;
238 	}
239 
240 	return ret;
241 }
242 
243 static void mhi_mbim_rx(struct mhi_mbim_context *mbim, struct sk_buff *skb)
244 {
245 	int ndpoffset;
246 
247 	/* Check NTB header and retrieve first NDP offset */
248 	ndpoffset = mbim_rx_verify_nth16(mbim, skb);
249 	if (ndpoffset < 0) {
250 		net_err_ratelimited("mbim: Incorrect NTB header\n");
251 		goto error;
252 	}
253 
254 	/* Process each NDP */
255 	while (1) {
256 		struct usb_cdc_ncm_ndp16 ndp16;
257 		struct usb_cdc_ncm_dpe16 dpe16;
258 		struct mhi_mbim_link *link;
259 		int nframes, n, dpeoffset;
260 		unsigned int session;
261 
262 		if (skb_copy_bits(skb, ndpoffset, &ndp16, sizeof(ndp16))) {
263 			net_err_ratelimited("mbim: Incorrect NDP offset (%u)\n",
264 					    ndpoffset);
265 			goto error;
266 		}
267 
268 		/* Check NDP header and retrieve number of datagrams */
269 		nframes = mbim_rx_verify_ndp16(skb, &ndp16);
270 		if (nframes < 0) {
271 			net_err_ratelimited("mbim: Incorrect NDP16\n");
272 			goto error;
273 		}
274 
275 		 /* Only IP data type supported, no DSS in MHI context */
276 		if ((ndp16.dwSignature & cpu_to_le32(MBIM_NDP16_SIGN_MASK))
277 				!= cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN)) {
278 			net_err_ratelimited("mbim: Unsupported NDP type\n");
279 			goto next_ndp;
280 		}
281 
282 		session = (le32_to_cpu(ndp16.dwSignature) & ~MBIM_NDP16_SIGN_MASK) >> 24;
283 
284 		rcu_read_lock();
285 
286 		link = mhi_mbim_get_link_rcu(mbim, session);
287 		if (!link) {
288 			net_err_ratelimited("mbim: bad packet session (%u)\n", session);
289 			goto unlock;
290 		}
291 
292 		/* de-aggregate and deliver IP packets */
293 		dpeoffset = ndpoffset + sizeof(struct usb_cdc_ncm_ndp16);
294 		for (n = 0; n < nframes; n++, dpeoffset += sizeof(dpe16)) {
295 			u16 dgram_offset, dgram_len;
296 			struct sk_buff *skbn;
297 
298 			if (skb_copy_bits(skb, dpeoffset, &dpe16, sizeof(dpe16)))
299 				break;
300 
301 			dgram_offset = le16_to_cpu(dpe16.wDatagramIndex);
302 			dgram_len = le16_to_cpu(dpe16.wDatagramLength);
303 
304 			if (!dgram_offset || !dgram_len)
305 				break; /* null terminator */
306 
307 			skbn = netdev_alloc_skb(link->ndev, dgram_len);
308 			if (!skbn)
309 				continue;
310 
311 			skb_put(skbn, dgram_len);
312 			skb_copy_bits(skb, dgram_offset, skbn->data, dgram_len);
313 
314 			switch (skbn->data[0] & 0xf0) {
315 			case 0x40:
316 				skbn->protocol = htons(ETH_P_IP);
317 				break;
318 			case 0x60:
319 				skbn->protocol = htons(ETH_P_IPV6);
320 				break;
321 			default:
322 				net_err_ratelimited("%s: unknown protocol\n",
323 						    link->ndev->name);
324 				dev_kfree_skb_any(skbn);
325 				u64_stats_update_begin(&link->rx_syncp);
326 				u64_stats_inc(&link->rx_errors);
327 				u64_stats_update_end(&link->rx_syncp);
328 				continue;
329 			}
330 
331 			u64_stats_update_begin(&link->rx_syncp);
332 			u64_stats_inc(&link->rx_packets);
333 			u64_stats_add(&link->rx_bytes, skbn->len);
334 			u64_stats_update_end(&link->rx_syncp);
335 
336 			netif_rx(skbn);
337 		}
338 unlock:
339 		rcu_read_unlock();
340 next_ndp:
341 		/* Other NDP to process? */
342 		ndpoffset = (int)le16_to_cpu(ndp16.wNextNdpIndex);
343 		if (!ndpoffset)
344 			break;
345 	}
346 
347 	/* free skb */
348 	dev_consume_skb_any(skb);
349 	return;
350 error:
351 	dev_kfree_skb_any(skb);
352 }
353 
354 static struct sk_buff *mhi_net_skb_agg(struct mhi_mbim_context *mbim,
355 				       struct sk_buff *skb)
356 {
357 	struct sk_buff *head = mbim->skbagg_head;
358 	struct sk_buff *tail = mbim->skbagg_tail;
359 
360 	/* This is non-paged skb chaining using frag_list */
361 	if (!head) {
362 		mbim->skbagg_head = skb;
363 		return skb;
364 	}
365 
366 	if (!skb_shinfo(head)->frag_list)
367 		skb_shinfo(head)->frag_list = skb;
368 	else
369 		tail->next = skb;
370 
371 	head->len += skb->len;
372 	head->data_len += skb->len;
373 	head->truesize += skb->truesize;
374 
375 	mbim->skbagg_tail = skb;
376 
377 	return mbim->skbagg_head;
378 }
379 
380 static void mhi_net_rx_refill_work(struct work_struct *work)
381 {
382 	struct mhi_mbim_context *mbim = container_of(work, struct mhi_mbim_context,
383 						     rx_refill.work);
384 	struct mhi_device *mdev = mbim->mdev;
385 	int err;
386 
387 	while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) {
388 		struct sk_buff *skb = alloc_skb(mbim->mru, GFP_KERNEL);
389 
390 		if (unlikely(!skb))
391 			break;
392 
393 		err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb,
394 				    mbim->mru, MHI_EOT);
395 		if (unlikely(err)) {
396 			kfree_skb(skb);
397 			break;
398 		}
399 
400 		/* Do not hog the CPU if rx buffers are consumed faster than
401 		 * queued (unlikely).
402 		 */
403 		cond_resched();
404 	}
405 
406 	/* If we're still starved of rx buffers, reschedule later */
407 	if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mbim->rx_queue_sz)
408 		schedule_delayed_work(&mbim->rx_refill, HZ / 2);
409 }
410 
411 static void mhi_mbim_dl_callback(struct mhi_device *mhi_dev,
412 				 struct mhi_result *mhi_res)
413 {
414 	struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev);
415 	struct sk_buff *skb = mhi_res->buf_addr;
416 	int free_desc_count;
417 
418 	free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
419 
420 	if (unlikely(mhi_res->transaction_status)) {
421 		switch (mhi_res->transaction_status) {
422 		case -EOVERFLOW:
423 			/* Packet has been split over multiple transfers */
424 			skb_put(skb, mhi_res->bytes_xferd);
425 			mhi_net_skb_agg(mbim, skb);
426 			break;
427 		case -ENOTCONN:
428 			/* MHI layer stopping/resetting the DL channel */
429 			dev_kfree_skb_any(skb);
430 			return;
431 		default:
432 			/* Unknown error, simply drop */
433 			dev_kfree_skb_any(skb);
434 		}
435 	} else {
436 		skb_put(skb, mhi_res->bytes_xferd);
437 
438 		if (mbim->skbagg_head) {
439 			/* Aggregate the final fragment */
440 			skb = mhi_net_skb_agg(mbim, skb);
441 			mbim->skbagg_head = NULL;
442 		}
443 
444 		mhi_mbim_rx(mbim, skb);
445 	}
446 
447 	/* Refill if RX buffers queue becomes low */
448 	if (free_desc_count >= mbim->rx_queue_sz / 2)
449 		schedule_delayed_work(&mbim->rx_refill, 0);
450 }
451 
452 static void mhi_mbim_ndo_get_stats64(struct net_device *ndev,
453 				     struct rtnl_link_stats64 *stats)
454 {
455 	struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
456 	unsigned int start;
457 
458 	do {
459 		start = u64_stats_fetch_begin(&link->rx_syncp);
460 		stats->rx_packets = u64_stats_read(&link->rx_packets);
461 		stats->rx_bytes = u64_stats_read(&link->rx_bytes);
462 		stats->rx_errors = u64_stats_read(&link->rx_errors);
463 	} while (u64_stats_fetch_retry(&link->rx_syncp, start));
464 
465 	do {
466 		start = u64_stats_fetch_begin(&link->tx_syncp);
467 		stats->tx_packets = u64_stats_read(&link->tx_packets);
468 		stats->tx_bytes = u64_stats_read(&link->tx_bytes);
469 		stats->tx_errors = u64_stats_read(&link->tx_errors);
470 		stats->tx_dropped = u64_stats_read(&link->tx_dropped);
471 	} while (u64_stats_fetch_retry(&link->tx_syncp, start));
472 }
473 
474 static void mhi_mbim_ul_callback(struct mhi_device *mhi_dev,
475 				 struct mhi_result *mhi_res)
476 {
477 	struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev);
478 	struct sk_buff *skb = mhi_res->buf_addr;
479 	struct net_device *ndev = skb->dev;
480 	struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
481 
482 	/* Hardware has consumed the buffer, so free the skb (which is not
483 	 * freed by the MHI stack) and perform accounting.
484 	 */
485 	dev_consume_skb_any(skb);
486 
487 	u64_stats_update_begin(&link->tx_syncp);
488 	if (unlikely(mhi_res->transaction_status)) {
489 		/* MHI layer stopping/resetting the UL channel */
490 		if (mhi_res->transaction_status == -ENOTCONN) {
491 			u64_stats_update_end(&link->tx_syncp);
492 			return;
493 		}
494 
495 		u64_stats_inc(&link->tx_errors);
496 	} else {
497 		u64_stats_inc(&link->tx_packets);
498 		u64_stats_add(&link->tx_bytes, mhi_res->bytes_xferd);
499 	}
500 	u64_stats_update_end(&link->tx_syncp);
501 
502 	if (netif_queue_stopped(ndev) && !mhi_queue_is_full(mbim->mdev, DMA_TO_DEVICE))
503 		netif_wake_queue(ndev);
504 }
505 
506 static int mhi_mbim_ndo_open(struct net_device *ndev)
507 {
508 	struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
509 
510 	/* Feed the MHI rx buffer pool */
511 	schedule_delayed_work(&link->mbim->rx_refill, 0);
512 
513 	/* Carrier is established via out-of-band channel (e.g. qmi) */
514 	netif_carrier_on(ndev);
515 
516 	netif_start_queue(ndev);
517 
518 	return 0;
519 }
520 
521 static int mhi_mbim_ndo_stop(struct net_device *ndev)
522 {
523 	netif_stop_queue(ndev);
524 	netif_carrier_off(ndev);
525 
526 	return 0;
527 }
528 
529 static const struct net_device_ops mhi_mbim_ndo = {
530 	.ndo_open = mhi_mbim_ndo_open,
531 	.ndo_stop = mhi_mbim_ndo_stop,
532 	.ndo_start_xmit = mhi_mbim_ndo_xmit,
533 	.ndo_get_stats64 = mhi_mbim_ndo_get_stats64,
534 };
535 
536 static int mhi_mbim_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
537 			    struct netlink_ext_ack *extack)
538 {
539 	struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
540 	struct mhi_mbim_context *mbim = ctxt;
541 
542 	link->session = if_id;
543 	link->mbim = mbim;
544 	link->ndev = ndev;
545 	u64_stats_init(&link->rx_syncp);
546 	u64_stats_init(&link->tx_syncp);
547 
548 	rcu_read_lock();
549 	if (mhi_mbim_get_link_rcu(mbim, if_id)) {
550 		rcu_read_unlock();
551 		return -EEXIST;
552 	}
553 	rcu_read_unlock();
554 
555 	/* Already protected by RTNL lock */
556 	hlist_add_head_rcu(&link->hlnode, &mbim->link_list[LINK_HASH(if_id)]);
557 
558 	return register_netdevice(ndev);
559 }
560 
561 static void mhi_mbim_dellink(void *ctxt, struct net_device *ndev,
562 			     struct list_head *head)
563 {
564 	struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
565 
566 	hlist_del_init_rcu(&link->hlnode);
567 	synchronize_rcu();
568 
569 	unregister_netdevice_queue(ndev, head);
570 }
571 
572 static void mhi_mbim_setup(struct net_device *ndev)
573 {
574 	ndev->header_ops = NULL;  /* No header */
575 	ndev->type = ARPHRD_RAWIP;
576 	ndev->needed_headroom = sizeof(struct mbim_tx_hdr);
577 	ndev->hard_header_len = 0;
578 	ndev->addr_len = 0;
579 	ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
580 	ndev->netdev_ops = &mhi_mbim_ndo;
581 	ndev->mtu = MHI_MBIM_DEFAULT_MTU;
582 	ndev->min_mtu = ETH_MIN_MTU;
583 	ndev->max_mtu = MHI_MAX_BUF_SZ - ndev->needed_headroom;
584 	ndev->tx_queue_len = 1000;
585 	ndev->needs_free_netdev = true;
586 }
587 
588 static const struct wwan_ops mhi_mbim_wwan_ops = {
589 	.priv_size = sizeof(struct mhi_mbim_link),
590 	.setup = mhi_mbim_setup,
591 	.newlink = mhi_mbim_newlink,
592 	.dellink = mhi_mbim_dellink,
593 };
594 
595 static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
596 {
597 	struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
598 	struct mhi_mbim_context *mbim;
599 	int err;
600 
601 	mbim = devm_kzalloc(&mhi_dev->dev, sizeof(*mbim), GFP_KERNEL);
602 	if (!mbim)
603 		return -ENOMEM;
604 
605 	spin_lock_init(&mbim->tx_lock);
606 	dev_set_drvdata(&mhi_dev->dev, mbim);
607 	mbim->mdev = mhi_dev;
608 	mbim->mru = mhi_dev->mhi_cntrl->mru ? mhi_dev->mhi_cntrl->mru : MHI_DEFAULT_MRU;
609 
610 	INIT_DELAYED_WORK(&mbim->rx_refill, mhi_net_rx_refill_work);
611 
612 	/* Start MHI channels */
613 	err = mhi_prepare_for_transfer(mhi_dev);
614 	if (err)
615 		return err;
616 
617 	/* Number of transfer descriptors determines size of the queue */
618 	mbim->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
619 
620 	/* Register wwan link ops with MHI controller representing WWAN instance */
621 	return wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_mbim_wwan_ops, mbim, 0);
622 }
623 
624 static void mhi_mbim_remove(struct mhi_device *mhi_dev)
625 {
626 	struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev);
627 	struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
628 
629 	mhi_unprepare_from_transfer(mhi_dev);
630 	cancel_delayed_work_sync(&mbim->rx_refill);
631 	wwan_unregister_ops(&cntrl->mhi_dev->dev);
632 	kfree_skb(mbim->skbagg_head);
633 	dev_set_drvdata(&mhi_dev->dev, NULL);
634 }
635 
636 static const struct mhi_device_id mhi_mbim_id_table[] = {
637 	/* Hardware accelerated data PATH (to modem IPA), MBIM protocol */
638 	{ .chan = "IP_HW0_MBIM", .driver_data = 0 },
639 	{}
640 };
641 MODULE_DEVICE_TABLE(mhi, mhi_mbim_id_table);
642 
643 static struct mhi_driver mhi_mbim_driver = {
644 	.probe = mhi_mbim_probe,
645 	.remove = mhi_mbim_remove,
646 	.dl_xfer_cb = mhi_mbim_dl_callback,
647 	.ul_xfer_cb = mhi_mbim_ul_callback,
648 	.id_table = mhi_mbim_id_table,
649 	.driver = {
650 		.name = "mhi_wwan_mbim",
651 		.owner = THIS_MODULE,
652 	},
653 };
654 
655 module_mhi_driver(mhi_mbim_driver);
656 
657 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
658 MODULE_DESCRIPTION("Network/MBIM over MHI");
659 MODULE_LICENSE("GPL v2");
660