1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021, MediaTek Inc.
4  * Copyright (c) 2021-2022, Intel Corporation.
5  *
6  * Authors:
7  *  Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
8  *  Haijun Liu <haijun.liu@mediatek.com>
9  *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
10  *
11  * Contributors:
12  *  Amir Hanania <amir.hanania@intel.com>
13  *  Andy Shevchenko <andriy.shevchenko@linux.intel.com>
14  *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
15  *  Eliot Lee <eliot.lee@intel.com>
16  *  Moises Veleta <moises.veleta@intel.com>
17  *  Sreehari Kancharla <sreehari.kancharla@intel.com>
18  */
19 
20 #include <linux/atomic.h>
21 #include <linux/device.h>
22 #include <linux/gfp.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_ether.h>
25 #include <linux/ip.h>
26 #include <linux/kernel.h>
27 #include <linux/list.h>
28 #include <linux/netdev_features.h>
29 #include <linux/netdevice.h>
30 #include <linux/skbuff.h>
31 #include <linux/types.h>
32 #include <linux/wwan.h>
33 #include <net/ipv6.h>
34 #include <net/pkt_sched.h>
35 
36 #include "t7xx_hif_dpmaif_rx.h"
37 #include "t7xx_hif_dpmaif_tx.h"
38 #include "t7xx_netdev.h"
39 #include "t7xx_pci.h"
40 #include "t7xx_port_proxy.h"
41 #include "t7xx_state_monitor.h"
42 
43 #define IP_MUX_SESSION_DEFAULT	0
44 #define SBD_PACKET_TYPE_MASK	GENMASK(7, 4)
45 
46 static void t7xx_ccmni_enable_napi(struct t7xx_ccmni_ctrl *ctlb)
47 {
48 	int i;
49 
50 	if (ctlb->is_napi_en)
51 		return;
52 
53 	for (i = 0; i < RXQ_NUM; i++) {
54 		napi_enable(ctlb->napi[i]);
55 		napi_schedule(ctlb->napi[i]);
56 	}
57 	ctlb->is_napi_en = true;
58 }
59 
60 static void t7xx_ccmni_disable_napi(struct t7xx_ccmni_ctrl *ctlb)
61 {
62 	int i;
63 
64 	if (!ctlb->is_napi_en)
65 		return;
66 
67 	for (i = 0; i < RXQ_NUM; i++) {
68 		napi_synchronize(ctlb->napi[i]);
69 		napi_disable(ctlb->napi[i]);
70 	}
71 
72 	ctlb->is_napi_en = false;
73 }
74 
75 static int t7xx_ccmni_open(struct net_device *dev)
76 {
77 	struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
78 	struct t7xx_ccmni_ctrl *ccmni_ctl = ccmni->ctlb;
79 
80 	netif_carrier_on(dev);
81 	netif_tx_start_all_queues(dev);
82 	if (!atomic_fetch_inc(&ccmni_ctl->napi_usr_refcnt))
83 		t7xx_ccmni_enable_napi(ccmni_ctl);
84 
85 	atomic_inc(&ccmni->usage);
86 	return 0;
87 }
88 
89 static int t7xx_ccmni_close(struct net_device *dev)
90 {
91 	struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
92 	struct t7xx_ccmni_ctrl *ccmni_ctl = ccmni->ctlb;
93 
94 	atomic_dec(&ccmni->usage);
95 	if (atomic_dec_and_test(&ccmni_ctl->napi_usr_refcnt))
96 		t7xx_ccmni_disable_napi(ccmni_ctl);
97 
98 	netif_carrier_off(dev);
99 	netif_tx_disable(dev);
100 	return 0;
101 }
102 
103 static int t7xx_ccmni_send_packet(struct t7xx_ccmni *ccmni, struct sk_buff *skb,
104 				  unsigned int txq_number)
105 {
106 	struct t7xx_ccmni_ctrl *ctlb = ccmni->ctlb;
107 	struct t7xx_skb_cb *skb_cb = T7XX_SKB_CB(skb);
108 
109 	skb_cb->netif_idx = ccmni->index;
110 
111 	if (t7xx_dpmaif_tx_send_skb(ctlb->hif_ctrl, txq_number, skb))
112 		return NETDEV_TX_BUSY;
113 
114 	return 0;
115 }
116 
117 static netdev_tx_t t7xx_ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
118 {
119 	struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
120 	int skb_len = skb->len;
121 
122 	/* If MTU is changed or there is no headroom, drop the packet */
123 	if (skb->len > dev->mtu || skb_headroom(skb) < sizeof(struct ccci_header)) {
124 		dev_kfree_skb(skb);
125 		dev->stats.tx_dropped++;
126 		return NETDEV_TX_OK;
127 	}
128 
129 	if (t7xx_ccmni_send_packet(ccmni, skb, DPMAIF_TX_DEFAULT_QUEUE))
130 		return NETDEV_TX_BUSY;
131 
132 	dev->stats.tx_packets++;
133 	dev->stats.tx_bytes += skb_len;
134 
135 	return NETDEV_TX_OK;
136 }
137 
138 static void t7xx_ccmni_tx_timeout(struct net_device *dev, unsigned int __always_unused txqueue)
139 {
140 	struct t7xx_ccmni *ccmni = netdev_priv(dev);
141 
142 	dev->stats.tx_errors++;
143 
144 	if (atomic_read(&ccmni->usage) > 0)
145 		netif_tx_wake_all_queues(dev);
146 }
147 
148 static const struct net_device_ops ccmni_netdev_ops = {
149 	.ndo_open	  = t7xx_ccmni_open,
150 	.ndo_stop	  = t7xx_ccmni_close,
151 	.ndo_start_xmit   = t7xx_ccmni_start_xmit,
152 	.ndo_tx_timeout   = t7xx_ccmni_tx_timeout,
153 };
154 
155 static void t7xx_ccmni_start(struct t7xx_ccmni_ctrl *ctlb)
156 {
157 	struct t7xx_ccmni *ccmni;
158 	int i;
159 
160 	for (i = 0; i < ctlb->nic_dev_num; i++) {
161 		ccmni = ctlb->ccmni_inst[i];
162 		if (!ccmni)
163 			continue;
164 
165 		if (atomic_read(&ccmni->usage) > 0) {
166 			netif_tx_start_all_queues(ccmni->dev);
167 			netif_carrier_on(ccmni->dev);
168 		}
169 	}
170 
171 	if (atomic_read(&ctlb->napi_usr_refcnt))
172 		t7xx_ccmni_enable_napi(ctlb);
173 }
174 
175 static void t7xx_ccmni_pre_stop(struct t7xx_ccmni_ctrl *ctlb)
176 {
177 	struct t7xx_ccmni *ccmni;
178 	int i;
179 
180 	for (i = 0; i < ctlb->nic_dev_num; i++) {
181 		ccmni = ctlb->ccmni_inst[i];
182 		if (!ccmni)
183 			continue;
184 
185 		if (atomic_read(&ccmni->usage) > 0)
186 			netif_tx_disable(ccmni->dev);
187 	}
188 }
189 
190 static void t7xx_ccmni_post_stop(struct t7xx_ccmni_ctrl *ctlb)
191 {
192 	struct t7xx_ccmni *ccmni;
193 	int i;
194 
195 	if (atomic_read(&ctlb->napi_usr_refcnt))
196 		t7xx_ccmni_disable_napi(ctlb);
197 
198 	for (i = 0; i < ctlb->nic_dev_num; i++) {
199 		ccmni = ctlb->ccmni_inst[i];
200 		if (!ccmni)
201 			continue;
202 
203 		if (atomic_read(&ccmni->usage) > 0)
204 			netif_carrier_off(ccmni->dev);
205 	}
206 }
207 
208 static void t7xx_ccmni_wwan_setup(struct net_device *dev)
209 {
210 	dev->needed_headroom += sizeof(struct ccci_header);
211 
212 	dev->mtu = ETH_DATA_LEN;
213 	dev->max_mtu = CCMNI_MTU_MAX;
214 	BUILD_BUG_ON(CCMNI_MTU_MAX > DPMAIF_HW_MTU_SIZE);
215 
216 	dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
217 	dev->watchdog_timeo = CCMNI_NETDEV_WDT_TO;
218 
219 	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
220 
221 	dev->features = NETIF_F_VLAN_CHALLENGED;
222 
223 	dev->features |= NETIF_F_SG;
224 	dev->hw_features |= NETIF_F_SG;
225 
226 	dev->features |= NETIF_F_HW_CSUM;
227 	dev->hw_features |= NETIF_F_HW_CSUM;
228 
229 	dev->features |= NETIF_F_RXCSUM;
230 	dev->hw_features |= NETIF_F_RXCSUM;
231 
232 	dev->features |= NETIF_F_GRO;
233 	dev->hw_features |= NETIF_F_GRO;
234 
235 	dev->needs_free_netdev = true;
236 
237 	dev->type = ARPHRD_NONE;
238 
239 	dev->netdev_ops = &ccmni_netdev_ops;
240 }
241 
242 static void t7xx_init_netdev_napi(struct t7xx_ccmni_ctrl *ctlb)
243 {
244 	int i;
245 
246 	/* one HW, but shared with multiple net devices,
247 	 * so add a dummy device for NAPI.
248 	 */
249 	init_dummy_netdev(&ctlb->dummy_dev);
250 	atomic_set(&ctlb->napi_usr_refcnt, 0);
251 	ctlb->is_napi_en = false;
252 
253 	for (i = 0; i < RXQ_NUM; i++) {
254 		ctlb->napi[i] = &ctlb->hif_ctrl->rxq[i].napi;
255 		netif_napi_add_weight(&ctlb->dummy_dev, ctlb->napi[i], t7xx_dpmaif_napi_rx_poll,
256 				      NIC_NAPI_POLL_BUDGET);
257 	}
258 }
259 
260 static void t7xx_uninit_netdev_napi(struct t7xx_ccmni_ctrl *ctlb)
261 {
262 	int i;
263 
264 	for (i = 0; i < RXQ_NUM; i++) {
265 		netif_napi_del(ctlb->napi[i]);
266 		ctlb->napi[i] = NULL;
267 	}
268 }
269 
270 static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id,
271 				   struct netlink_ext_ack *extack)
272 {
273 	struct t7xx_ccmni_ctrl *ctlb = ctxt;
274 	struct t7xx_ccmni *ccmni;
275 	int ret;
276 
277 	if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst))
278 		return -EINVAL;
279 
280 	ccmni = wwan_netdev_drvpriv(dev);
281 	ccmni->index = if_id;
282 	ccmni->ctlb = ctlb;
283 	ccmni->dev = dev;
284 	atomic_set(&ccmni->usage, 0);
285 	ctlb->ccmni_inst[if_id] = ccmni;
286 
287 	ret = register_netdevice(dev);
288 	if (ret)
289 		return ret;
290 
291 	netif_device_attach(dev);
292 	return 0;
293 }
294 
295 static void t7xx_ccmni_wwan_dellink(void *ctxt, struct net_device *dev, struct list_head *head)
296 {
297 	struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
298 	struct t7xx_ccmni_ctrl *ctlb = ctxt;
299 	u8 if_id = ccmni->index;
300 
301 	if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst))
302 		return;
303 
304 	if (WARN_ON(ctlb->ccmni_inst[if_id] != ccmni))
305 		return;
306 
307 	unregister_netdevice(dev);
308 }
309 
310 static const struct wwan_ops ccmni_wwan_ops = {
311 	.priv_size = sizeof(struct t7xx_ccmni),
312 	.setup     = t7xx_ccmni_wwan_setup,
313 	.newlink   = t7xx_ccmni_wwan_newlink,
314 	.dellink   = t7xx_ccmni_wwan_dellink,
315 };
316 
317 static int t7xx_ccmni_register_wwan(struct t7xx_ccmni_ctrl *ctlb)
318 {
319 	struct device *dev = ctlb->hif_ctrl->dev;
320 	int ret;
321 
322 	if (ctlb->wwan_is_registered)
323 		return 0;
324 
325 	/* WWAN core will create a netdev for the default IP MUX channel */
326 	ret = wwan_register_ops(dev, &ccmni_wwan_ops, ctlb, IP_MUX_SESSION_DEFAULT);
327 	if (ret < 0) {
328 		dev_err(dev, "Unable to register WWAN ops, %d\n", ret);
329 		return ret;
330 	}
331 
332 	ctlb->wwan_is_registered = true;
333 	return 0;
334 }
335 
336 static int t7xx_ccmni_md_state_callback(enum md_state state, void *para)
337 {
338 	struct t7xx_ccmni_ctrl *ctlb = para;
339 	struct device *dev;
340 	int ret = 0;
341 
342 	dev = ctlb->hif_ctrl->dev;
343 	ctlb->md_sta = state;
344 
345 	switch (state) {
346 	case MD_STATE_READY:
347 		ret = t7xx_ccmni_register_wwan(ctlb);
348 		if (!ret)
349 			t7xx_ccmni_start(ctlb);
350 		break;
351 
352 	case MD_STATE_EXCEPTION:
353 	case MD_STATE_STOPPED:
354 		t7xx_ccmni_pre_stop(ctlb);
355 
356 		ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state);
357 		if (ret < 0)
358 			dev_err(dev, "DPMAIF md state callback err, state=%d\n", state);
359 
360 		t7xx_ccmni_post_stop(ctlb);
361 		break;
362 
363 	case MD_STATE_WAITING_FOR_HS1:
364 	case MD_STATE_WAITING_TO_STOP:
365 		ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state);
366 		if (ret < 0)
367 			dev_err(dev, "DPMAIF md state callback err, state=%d\n", state);
368 
369 		break;
370 
371 	default:
372 		break;
373 	}
374 
375 	return ret;
376 }
377 
378 static void init_md_status_notifier(struct t7xx_pci_dev *t7xx_dev)
379 {
380 	struct t7xx_ccmni_ctrl	*ctlb = t7xx_dev->ccmni_ctlb;
381 	struct t7xx_fsm_notifier *md_status_notifier;
382 
383 	md_status_notifier = &ctlb->md_status_notify;
384 	INIT_LIST_HEAD(&md_status_notifier->entry);
385 	md_status_notifier->notifier_fn = t7xx_ccmni_md_state_callback;
386 	md_status_notifier->data = ctlb;
387 
388 	t7xx_fsm_notifier_register(t7xx_dev->md, md_status_notifier);
389 }
390 
391 static void t7xx_ccmni_recv_skb(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_buff *skb,
392 				struct napi_struct *napi)
393 {
394 	struct t7xx_skb_cb *skb_cb;
395 	struct net_device *net_dev;
396 	struct t7xx_ccmni *ccmni;
397 	int pkt_type, skb_len;
398 	u8 netif_id;
399 
400 	skb_cb = T7XX_SKB_CB(skb);
401 	netif_id = skb_cb->netif_idx;
402 	ccmni = ccmni_ctlb->ccmni_inst[netif_id];
403 	if (!ccmni) {
404 		dev_kfree_skb(skb);
405 		return;
406 	}
407 
408 	net_dev = ccmni->dev;
409 	pkt_type = skb_cb->rx_pkt_type;
410 	skb->dev = net_dev;
411 	if (pkt_type == PKT_TYPE_IP6)
412 		skb->protocol = htons(ETH_P_IPV6);
413 	else
414 		skb->protocol = htons(ETH_P_IP);
415 
416 	skb_len = skb->len;
417 	napi_gro_receive(napi, skb);
418 	net_dev->stats.rx_packets++;
419 	net_dev->stats.rx_bytes += skb_len;
420 }
421 
422 static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
423 {
424 	struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0];
425 	struct netdev_queue *net_queue;
426 
427 	if (netif_running(ccmni->dev) && atomic_read(&ccmni->usage) > 0) {
428 		net_queue = netdev_get_tx_queue(ccmni->dev, qno);
429 		if (netif_tx_queue_stopped(net_queue))
430 			netif_tx_wake_queue(net_queue);
431 	}
432 }
433 
434 static void t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
435 {
436 	struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0];
437 	struct netdev_queue *net_queue;
438 
439 	if (atomic_read(&ccmni->usage) > 0) {
440 		netdev_err(ccmni->dev, "TX queue %d is full\n", qno);
441 		net_queue = netdev_get_tx_queue(ccmni->dev, qno);
442 		netif_tx_stop_queue(net_queue);
443 	}
444 }
445 
446 static void t7xx_ccmni_queue_state_notify(struct t7xx_pci_dev *t7xx_dev,
447 					  enum dpmaif_txq_state state, int qno)
448 {
449 	struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
450 
451 	if (ctlb->md_sta != MD_STATE_READY)
452 		return;
453 
454 	if (!ctlb->ccmni_inst[0]) {
455 		dev_warn(&t7xx_dev->pdev->dev, "No netdev registered yet\n");
456 		return;
457 	}
458 
459 	if (state == DMPAIF_TXQ_STATE_IRQ)
460 		t7xx_ccmni_queue_tx_irq_notify(ctlb, qno);
461 	else if (state == DMPAIF_TXQ_STATE_FULL)
462 		t7xx_ccmni_queue_tx_full_notify(ctlb, qno);
463 }
464 
465 int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev)
466 {
467 	struct device *dev = &t7xx_dev->pdev->dev;
468 	struct t7xx_ccmni_ctrl *ctlb;
469 
470 	ctlb = devm_kzalloc(dev, sizeof(*ctlb), GFP_KERNEL);
471 	if (!ctlb)
472 		return -ENOMEM;
473 
474 	t7xx_dev->ccmni_ctlb = ctlb;
475 	ctlb->t7xx_dev = t7xx_dev;
476 	ctlb->callbacks.state_notify = t7xx_ccmni_queue_state_notify;
477 	ctlb->callbacks.recv_skb = t7xx_ccmni_recv_skb;
478 	ctlb->nic_dev_num = NIC_DEV_DEFAULT;
479 
480 	ctlb->hif_ctrl = t7xx_dpmaif_hif_init(t7xx_dev, &ctlb->callbacks);
481 	if (!ctlb->hif_ctrl)
482 		return -ENOMEM;
483 
484 	t7xx_init_netdev_napi(ctlb);
485 	init_md_status_notifier(t7xx_dev);
486 	return 0;
487 }
488 
489 void t7xx_ccmni_exit(struct t7xx_pci_dev *t7xx_dev)
490 {
491 	struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
492 
493 	t7xx_fsm_notifier_unregister(t7xx_dev->md, &ctlb->md_status_notify);
494 
495 	if (ctlb->wwan_is_registered) {
496 		wwan_unregister_ops(&t7xx_dev->pdev->dev);
497 		ctlb->wwan_is_registered = false;
498 	}
499 
500 	t7xx_uninit_netdev_napi(ctlb);
501 	t7xx_dpmaif_hif_exit(ctlb->hif_ctrl);
502 }
503