xref: /openbmc/linux/drivers/net/wwan/t7xx/t7xx_netdev.c (revision 44ad3baf1cca483e418b6aadf2d3994f69e0f16a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021, MediaTek Inc.
4  * Copyright (c) 2021-2022, Intel Corporation.
5  *
6  * Authors:
7  *  Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
8  *  Haijun Liu <haijun.liu@mediatek.com>
9  *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
10  *
11  * Contributors:
12  *  Amir Hanania <amir.hanania@intel.com>
13  *  Andy Shevchenko <andriy.shevchenko@linux.intel.com>
14  *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
15  *  Eliot Lee <eliot.lee@intel.com>
16  *  Moises Veleta <moises.veleta@intel.com>
17  *  Sreehari Kancharla <sreehari.kancharla@intel.com>
18  */
19 
20 #include <linux/atomic.h>
21 #include <linux/device.h>
22 #include <linux/gfp.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_ether.h>
25 #include <linux/ip.h>
26 #include <linux/kernel.h>
27 #include <linux/list.h>
28 #include <linux/netdev_features.h>
29 #include <linux/netdevice.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/skbuff.h>
32 #include <linux/types.h>
33 #include <linux/wwan.h>
34 #include <net/ipv6.h>
35 #include <net/pkt_sched.h>
36 
37 #include "t7xx_hif_dpmaif_rx.h"
38 #include "t7xx_hif_dpmaif_tx.h"
39 #include "t7xx_netdev.h"
40 #include "t7xx_pci.h"
41 #include "t7xx_port_proxy.h"
42 #include "t7xx_state_monitor.h"
43 
44 #define IP_MUX_SESSION_DEFAULT	0
45 #define SBD_PACKET_TYPE_MASK	GENMASK(7, 4)
46 
t7xx_ccmni_enable_napi(struct t7xx_ccmni_ctrl * ctlb)47 static void t7xx_ccmni_enable_napi(struct t7xx_ccmni_ctrl *ctlb)
48 {
49 	struct dpmaif_ctrl *ctrl;
50 	int i, ret;
51 
52 	ctrl =  ctlb->hif_ctrl;
53 
54 	if (ctlb->is_napi_en)
55 		return;
56 
57 	for (i = 0; i < RXQ_NUM; i++) {
58 		/* The usage count has to be bumped every time before calling
59 		 * napi_schedule. It will be decresed in the poll routine,
60 		 * right after napi_complete_done is called.
61 		 */
62 		ret = pm_runtime_resume_and_get(ctrl->dev);
63 		if (ret < 0) {
64 			dev_err(ctrl->dev, "Failed to resume device: %d\n",
65 				ret);
66 			return;
67 		}
68 		napi_enable(ctlb->napi[i]);
69 		napi_schedule(ctlb->napi[i]);
70 	}
71 	ctlb->is_napi_en = true;
72 }
73 
t7xx_ccmni_disable_napi(struct t7xx_ccmni_ctrl * ctlb)74 static void t7xx_ccmni_disable_napi(struct t7xx_ccmni_ctrl *ctlb)
75 {
76 	int i;
77 
78 	if (!ctlb->is_napi_en)
79 		return;
80 
81 	for (i = 0; i < RXQ_NUM; i++) {
82 		napi_synchronize(ctlb->napi[i]);
83 		napi_disable(ctlb->napi[i]);
84 	}
85 
86 	ctlb->is_napi_en = false;
87 }
88 
t7xx_ccmni_open(struct net_device * dev)89 static int t7xx_ccmni_open(struct net_device *dev)
90 {
91 	struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
92 	struct t7xx_ccmni_ctrl *ccmni_ctl = ccmni->ctlb;
93 
94 	netif_carrier_on(dev);
95 	netif_tx_start_all_queues(dev);
96 	if (!atomic_fetch_inc(&ccmni_ctl->napi_usr_refcnt))
97 		t7xx_ccmni_enable_napi(ccmni_ctl);
98 
99 	atomic_inc(&ccmni->usage);
100 	return 0;
101 }
102 
t7xx_ccmni_close(struct net_device * dev)103 static int t7xx_ccmni_close(struct net_device *dev)
104 {
105 	struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
106 	struct t7xx_ccmni_ctrl *ccmni_ctl = ccmni->ctlb;
107 
108 	atomic_dec(&ccmni->usage);
109 	if (atomic_dec_and_test(&ccmni_ctl->napi_usr_refcnt))
110 		t7xx_ccmni_disable_napi(ccmni_ctl);
111 
112 	netif_carrier_off(dev);
113 	netif_tx_disable(dev);
114 	return 0;
115 }
116 
t7xx_ccmni_send_packet(struct t7xx_ccmni * ccmni,struct sk_buff * skb,unsigned int txq_number)117 static int t7xx_ccmni_send_packet(struct t7xx_ccmni *ccmni, struct sk_buff *skb,
118 				  unsigned int txq_number)
119 {
120 	struct t7xx_ccmni_ctrl *ctlb = ccmni->ctlb;
121 	struct t7xx_skb_cb *skb_cb = T7XX_SKB_CB(skb);
122 
123 	skb_cb->netif_idx = ccmni->index;
124 
125 	if (t7xx_dpmaif_tx_send_skb(ctlb->hif_ctrl, txq_number, skb))
126 		return NETDEV_TX_BUSY;
127 
128 	return 0;
129 }
130 
t7xx_ccmni_start_xmit(struct sk_buff * skb,struct net_device * dev)131 static netdev_tx_t t7xx_ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
132 {
133 	struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
134 	int skb_len = skb->len;
135 
136 	/* If MTU is changed or there is no headroom, drop the packet */
137 	if (skb->len > dev->mtu || skb_headroom(skb) < sizeof(struct ccci_header)) {
138 		dev_kfree_skb(skb);
139 		dev->stats.tx_dropped++;
140 		return NETDEV_TX_OK;
141 	}
142 
143 	if (t7xx_ccmni_send_packet(ccmni, skb, DPMAIF_TX_DEFAULT_QUEUE))
144 		return NETDEV_TX_BUSY;
145 
146 	dev->stats.tx_packets++;
147 	dev->stats.tx_bytes += skb_len;
148 
149 	return NETDEV_TX_OK;
150 }
151 
t7xx_ccmni_tx_timeout(struct net_device * dev,unsigned int __always_unused txqueue)152 static void t7xx_ccmni_tx_timeout(struct net_device *dev, unsigned int __always_unused txqueue)
153 {
154 	struct t7xx_ccmni *ccmni = netdev_priv(dev);
155 
156 	dev->stats.tx_errors++;
157 
158 	if (atomic_read(&ccmni->usage) > 0)
159 		netif_tx_wake_all_queues(dev);
160 }
161 
162 static const struct net_device_ops ccmni_netdev_ops = {
163 	.ndo_open	  = t7xx_ccmni_open,
164 	.ndo_stop	  = t7xx_ccmni_close,
165 	.ndo_start_xmit   = t7xx_ccmni_start_xmit,
166 	.ndo_tx_timeout   = t7xx_ccmni_tx_timeout,
167 };
168 
t7xx_ccmni_start(struct t7xx_ccmni_ctrl * ctlb)169 static void t7xx_ccmni_start(struct t7xx_ccmni_ctrl *ctlb)
170 {
171 	struct t7xx_ccmni *ccmni;
172 	int i;
173 
174 	for (i = 0; i < ctlb->nic_dev_num; i++) {
175 		ccmni = ctlb->ccmni_inst[i];
176 		if (!ccmni)
177 			continue;
178 
179 		if (atomic_read(&ccmni->usage) > 0) {
180 			netif_tx_start_all_queues(ccmni->dev);
181 			netif_carrier_on(ccmni->dev);
182 		}
183 	}
184 
185 	if (atomic_read(&ctlb->napi_usr_refcnt))
186 		t7xx_ccmni_enable_napi(ctlb);
187 }
188 
t7xx_ccmni_pre_stop(struct t7xx_ccmni_ctrl * ctlb)189 static void t7xx_ccmni_pre_stop(struct t7xx_ccmni_ctrl *ctlb)
190 {
191 	struct t7xx_ccmni *ccmni;
192 	int i;
193 
194 	for (i = 0; i < ctlb->nic_dev_num; i++) {
195 		ccmni = ctlb->ccmni_inst[i];
196 		if (!ccmni)
197 			continue;
198 
199 		if (atomic_read(&ccmni->usage) > 0)
200 			netif_tx_disable(ccmni->dev);
201 	}
202 }
203 
t7xx_ccmni_post_stop(struct t7xx_ccmni_ctrl * ctlb)204 static void t7xx_ccmni_post_stop(struct t7xx_ccmni_ctrl *ctlb)
205 {
206 	struct t7xx_ccmni *ccmni;
207 	int i;
208 
209 	if (atomic_read(&ctlb->napi_usr_refcnt))
210 		t7xx_ccmni_disable_napi(ctlb);
211 
212 	for (i = 0; i < ctlb->nic_dev_num; i++) {
213 		ccmni = ctlb->ccmni_inst[i];
214 		if (!ccmni)
215 			continue;
216 
217 		if (atomic_read(&ccmni->usage) > 0)
218 			netif_carrier_off(ccmni->dev);
219 	}
220 }
221 
t7xx_ccmni_wwan_setup(struct net_device * dev)222 static void t7xx_ccmni_wwan_setup(struct net_device *dev)
223 {
224 	dev->needed_headroom += sizeof(struct ccci_header);
225 
226 	dev->mtu = ETH_DATA_LEN;
227 	dev->max_mtu = CCMNI_MTU_MAX;
228 	BUILD_BUG_ON(CCMNI_MTU_MAX > DPMAIF_HW_MTU_SIZE);
229 
230 	dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
231 	dev->watchdog_timeo = CCMNI_NETDEV_WDT_TO;
232 
233 	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
234 
235 	dev->features = NETIF_F_VLAN_CHALLENGED;
236 
237 	dev->features |= NETIF_F_SG;
238 	dev->hw_features |= NETIF_F_SG;
239 
240 	dev->features |= NETIF_F_HW_CSUM;
241 	dev->hw_features |= NETIF_F_HW_CSUM;
242 
243 	dev->features |= NETIF_F_RXCSUM;
244 	dev->hw_features |= NETIF_F_RXCSUM;
245 
246 	dev->features |= NETIF_F_GRO;
247 	dev->hw_features |= NETIF_F_GRO;
248 
249 	dev->needs_free_netdev = true;
250 
251 	dev->type = ARPHRD_NONE;
252 
253 	dev->netdev_ops = &ccmni_netdev_ops;
254 }
255 
t7xx_init_netdev_napi(struct t7xx_ccmni_ctrl * ctlb)256 static void t7xx_init_netdev_napi(struct t7xx_ccmni_ctrl *ctlb)
257 {
258 	int i;
259 
260 	/* one HW, but shared with multiple net devices,
261 	 * so add a dummy device for NAPI.
262 	 */
263 	init_dummy_netdev(&ctlb->dummy_dev);
264 	atomic_set(&ctlb->napi_usr_refcnt, 0);
265 	ctlb->is_napi_en = false;
266 
267 	for (i = 0; i < RXQ_NUM; i++) {
268 		ctlb->napi[i] = &ctlb->hif_ctrl->rxq[i].napi;
269 		netif_napi_add_weight(&ctlb->dummy_dev, ctlb->napi[i], t7xx_dpmaif_napi_rx_poll,
270 				      NIC_NAPI_POLL_BUDGET);
271 	}
272 }
273 
t7xx_uninit_netdev_napi(struct t7xx_ccmni_ctrl * ctlb)274 static void t7xx_uninit_netdev_napi(struct t7xx_ccmni_ctrl *ctlb)
275 {
276 	int i;
277 
278 	for (i = 0; i < RXQ_NUM; i++) {
279 		netif_napi_del(ctlb->napi[i]);
280 		ctlb->napi[i] = NULL;
281 	}
282 }
283 
t7xx_ccmni_wwan_newlink(void * ctxt,struct net_device * dev,u32 if_id,struct netlink_ext_ack * extack)284 static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id,
285 				   struct netlink_ext_ack *extack)
286 {
287 	struct t7xx_ccmni_ctrl *ctlb = ctxt;
288 	struct t7xx_ccmni *ccmni;
289 	int ret;
290 
291 	if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst))
292 		return -EINVAL;
293 
294 	ccmni = wwan_netdev_drvpriv(dev);
295 	ccmni->index = if_id;
296 	ccmni->ctlb = ctlb;
297 	ccmni->dev = dev;
298 	atomic_set(&ccmni->usage, 0);
299 	WRITE_ONCE(ctlb->ccmni_inst[if_id], ccmni);
300 
301 	ret = register_netdevice(dev);
302 	if (ret)
303 		return ret;
304 
305 	netif_device_attach(dev);
306 	return 0;
307 }
308 
t7xx_ccmni_wwan_dellink(void * ctxt,struct net_device * dev,struct list_head * head)309 static void t7xx_ccmni_wwan_dellink(void *ctxt, struct net_device *dev, struct list_head *head)
310 {
311 	struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
312 	struct t7xx_ccmni_ctrl *ctlb = ctxt;
313 	u8 if_id = ccmni->index;
314 
315 	if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst))
316 		return;
317 
318 	if (WARN_ON(ctlb->ccmni_inst[if_id] != ccmni))
319 		return;
320 
321 	WRITE_ONCE(ctlb->ccmni_inst[if_id], NULL);
322 	unregister_netdevice(dev);
323 }
324 
325 static const struct wwan_ops ccmni_wwan_ops = {
326 	.priv_size = sizeof(struct t7xx_ccmni),
327 	.setup     = t7xx_ccmni_wwan_setup,
328 	.newlink   = t7xx_ccmni_wwan_newlink,
329 	.dellink   = t7xx_ccmni_wwan_dellink,
330 };
331 
t7xx_ccmni_register_wwan(struct t7xx_ccmni_ctrl * ctlb)332 static int t7xx_ccmni_register_wwan(struct t7xx_ccmni_ctrl *ctlb)
333 {
334 	struct device *dev = ctlb->hif_ctrl->dev;
335 	int ret;
336 
337 	if (ctlb->wwan_is_registered)
338 		return 0;
339 
340 	/* WWAN core will create a netdev for the default IP MUX channel */
341 	ret = wwan_register_ops(dev, &ccmni_wwan_ops, ctlb, IP_MUX_SESSION_DEFAULT);
342 	if (ret < 0) {
343 		dev_err(dev, "Unable to register WWAN ops, %d\n", ret);
344 		return ret;
345 	}
346 
347 	ctlb->wwan_is_registered = true;
348 	return 0;
349 }
350 
t7xx_ccmni_md_state_callback(enum md_state state,void * para)351 static int t7xx_ccmni_md_state_callback(enum md_state state, void *para)
352 {
353 	struct t7xx_ccmni_ctrl *ctlb = para;
354 	struct device *dev;
355 	int ret = 0;
356 
357 	dev = ctlb->hif_ctrl->dev;
358 	ctlb->md_sta = state;
359 
360 	switch (state) {
361 	case MD_STATE_READY:
362 		ret = t7xx_ccmni_register_wwan(ctlb);
363 		if (!ret)
364 			t7xx_ccmni_start(ctlb);
365 		break;
366 
367 	case MD_STATE_EXCEPTION:
368 	case MD_STATE_STOPPED:
369 		t7xx_ccmni_pre_stop(ctlb);
370 
371 		ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state);
372 		if (ret < 0)
373 			dev_err(dev, "DPMAIF md state callback err, state=%d\n", state);
374 
375 		t7xx_ccmni_post_stop(ctlb);
376 		break;
377 
378 	case MD_STATE_WAITING_FOR_HS1:
379 	case MD_STATE_WAITING_TO_STOP:
380 		ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state);
381 		if (ret < 0)
382 			dev_err(dev, "DPMAIF md state callback err, state=%d\n", state);
383 
384 		break;
385 
386 	default:
387 		break;
388 	}
389 
390 	return ret;
391 }
392 
init_md_status_notifier(struct t7xx_pci_dev * t7xx_dev)393 static void init_md_status_notifier(struct t7xx_pci_dev *t7xx_dev)
394 {
395 	struct t7xx_ccmni_ctrl	*ctlb = t7xx_dev->ccmni_ctlb;
396 	struct t7xx_fsm_notifier *md_status_notifier;
397 
398 	md_status_notifier = &ctlb->md_status_notify;
399 	INIT_LIST_HEAD(&md_status_notifier->entry);
400 	md_status_notifier->notifier_fn = t7xx_ccmni_md_state_callback;
401 	md_status_notifier->data = ctlb;
402 
403 	t7xx_fsm_notifier_register(t7xx_dev->md, md_status_notifier);
404 }
405 
t7xx_ccmni_recv_skb(struct t7xx_ccmni_ctrl * ccmni_ctlb,struct sk_buff * skb,struct napi_struct * napi)406 static void t7xx_ccmni_recv_skb(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_buff *skb,
407 				struct napi_struct *napi)
408 {
409 	struct t7xx_skb_cb *skb_cb;
410 	struct net_device *net_dev;
411 	struct t7xx_ccmni *ccmni;
412 	int pkt_type, skb_len;
413 	u8 netif_id;
414 
415 	skb_cb = T7XX_SKB_CB(skb);
416 	netif_id = skb_cb->netif_idx;
417 	ccmni = READ_ONCE(ccmni_ctlb->ccmni_inst[netif_id]);
418 	if (!ccmni) {
419 		dev_kfree_skb(skb);
420 		return;
421 	}
422 
423 	net_dev = ccmni->dev;
424 	pkt_type = skb_cb->rx_pkt_type;
425 	skb->dev = net_dev;
426 	if (pkt_type == PKT_TYPE_IP6)
427 		skb->protocol = htons(ETH_P_IPV6);
428 	else
429 		skb->protocol = htons(ETH_P_IP);
430 
431 	skb_len = skb->len;
432 	napi_gro_receive(napi, skb);
433 	net_dev->stats.rx_packets++;
434 	net_dev->stats.rx_bytes += skb_len;
435 }
436 
t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl * ctlb,int qno)437 static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
438 {
439 	struct t7xx_ccmni *ccmni = READ_ONCE(ctlb->ccmni_inst[0]);
440 	struct netdev_queue *net_queue;
441 
442 	if (netif_running(ccmni->dev) && atomic_read(&ccmni->usage) > 0) {
443 		net_queue = netdev_get_tx_queue(ccmni->dev, qno);
444 		if (netif_tx_queue_stopped(net_queue))
445 			netif_tx_wake_queue(net_queue);
446 	}
447 }
448 
t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl * ctlb,int qno)449 static void t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
450 {
451 	struct t7xx_ccmni *ccmni = READ_ONCE(ctlb->ccmni_inst[0]);
452 	struct netdev_queue *net_queue;
453 
454 	if (atomic_read(&ccmni->usage) > 0) {
455 		netdev_err(ccmni->dev, "TX queue %d is full\n", qno);
456 		net_queue = netdev_get_tx_queue(ccmni->dev, qno);
457 		netif_tx_stop_queue(net_queue);
458 	}
459 }
460 
t7xx_ccmni_queue_state_notify(struct t7xx_pci_dev * t7xx_dev,enum dpmaif_txq_state state,int qno)461 static void t7xx_ccmni_queue_state_notify(struct t7xx_pci_dev *t7xx_dev,
462 					  enum dpmaif_txq_state state, int qno)
463 {
464 	struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
465 
466 	if (ctlb->md_sta != MD_STATE_READY)
467 		return;
468 
469 	if (!READ_ONCE(ctlb->ccmni_inst[0])) {
470 		dev_warn(&t7xx_dev->pdev->dev, "No netdev registered yet\n");
471 		return;
472 	}
473 
474 	if (state == DMPAIF_TXQ_STATE_IRQ)
475 		t7xx_ccmni_queue_tx_irq_notify(ctlb, qno);
476 	else if (state == DMPAIF_TXQ_STATE_FULL)
477 		t7xx_ccmni_queue_tx_full_notify(ctlb, qno);
478 }
479 
t7xx_ccmni_init(struct t7xx_pci_dev * t7xx_dev)480 int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev)
481 {
482 	struct device *dev = &t7xx_dev->pdev->dev;
483 	struct t7xx_ccmni_ctrl *ctlb;
484 
485 	ctlb = devm_kzalloc(dev, sizeof(*ctlb), GFP_KERNEL);
486 	if (!ctlb)
487 		return -ENOMEM;
488 
489 	t7xx_dev->ccmni_ctlb = ctlb;
490 	ctlb->t7xx_dev = t7xx_dev;
491 	ctlb->callbacks.state_notify = t7xx_ccmni_queue_state_notify;
492 	ctlb->callbacks.recv_skb = t7xx_ccmni_recv_skb;
493 	ctlb->nic_dev_num = NIC_DEV_DEFAULT;
494 
495 	ctlb->hif_ctrl = t7xx_dpmaif_hif_init(t7xx_dev, &ctlb->callbacks);
496 	if (!ctlb->hif_ctrl)
497 		return -ENOMEM;
498 
499 	t7xx_init_netdev_napi(ctlb);
500 	init_md_status_notifier(t7xx_dev);
501 	return 0;
502 }
503 
t7xx_ccmni_exit(struct t7xx_pci_dev * t7xx_dev)504 void t7xx_ccmni_exit(struct t7xx_pci_dev *t7xx_dev)
505 {
506 	struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
507 
508 	t7xx_fsm_notifier_unregister(t7xx_dev->md, &ctlb->md_status_notify);
509 
510 	if (ctlb->wwan_is_registered) {
511 		wwan_unregister_ops(&t7xx_dev->pdev->dev);
512 		ctlb->wwan_is_registered = false;
513 	}
514 
515 	t7xx_uninit_netdev_napi(ctlb);
516 	t7xx_dpmaif_hif_exit(ctlb->hif_ctrl);
517 }
518