1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-21 Intel Corporation.
4  */
5 
6 #include <linux/etherdevice.h>
7 #include <linux/if_arp.h>
8 #include <linux/if_link.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/rtnetlink.h>
11 #include <linux/wwan.h>
12 #include <net/pkt_sched.h>
13 
14 #include "iosm_ipc_chnl_cfg.h"
15 #include "iosm_ipc_imem_ops.h"
16 #include "iosm_ipc_wwan.h"
17 
18 #define IOSM_IP_TYPE_MASK 0xF0
19 #define IOSM_IP_TYPE_IPV4 0x40
20 #define IOSM_IP_TYPE_IPV6 0x60
21 
22 /**
23  * struct iosm_netdev_priv - netdev WWAN driver specific private data
24  * @ipc_wwan:	Pointer to iosm_wwan struct
25  * @netdev:	Pointer to network interface device structure
26  * @if_id:	Interface id for device.
27  * @ch_id:	IPC channel number for which interface device is created.
28  */
29 struct iosm_netdev_priv {
30 	struct iosm_wwan *ipc_wwan;
31 	struct net_device *netdev;
32 	int if_id;
33 	int ch_id;
34 };
35 
36 /**
37  * struct iosm_wwan - This structure contains information about WWAN root device
38  *		      and interface to the IPC layer.
39  * @ipc_imem:		Pointer to imem data-struct
40  * @sub_netlist:	List of active netdevs
41  * @dev:		Pointer device structure
42  */
43 struct iosm_wwan {
44 	struct iosm_imem *ipc_imem;
45 	struct iosm_netdev_priv __rcu *sub_netlist[IP_MUX_SESSION_END + 1];
46 	struct device *dev;
47 };
48 
49 /* Bring-up the wwan net link */
50 static int ipc_wwan_link_open(struct net_device *netdev)
51 {
52 	struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
53 	struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
54 	int if_id = priv->if_id;
55 	int ret = 0;
56 
57 	if (if_id < IP_MUX_SESSION_START ||
58 	    if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
59 		return -EINVAL;
60 
61 	pm_runtime_get_sync(ipc_wwan->ipc_imem->dev);
62 	/* get channel id */
63 	priv->ch_id = ipc_imem_sys_wwan_open(ipc_wwan->ipc_imem, if_id);
64 
65 	if (priv->ch_id < 0) {
66 		dev_err(ipc_wwan->dev,
67 			"cannot connect wwan0 & id %d to the IPC mem layer",
68 			if_id);
69 		ret = -ENODEV;
70 		goto err_out;
71 	}
72 
73 	/* enable tx path, DL data may follow */
74 	netif_start_queue(netdev);
75 
76 	dev_dbg(ipc_wwan->dev, "Channel id %d allocated to if_id %d",
77 		priv->ch_id, priv->if_id);
78 
79 err_out:
80 	pm_runtime_mark_last_busy(ipc_wwan->ipc_imem->dev);
81 	pm_runtime_put_autosuspend(ipc_wwan->ipc_imem->dev);
82 
83 	return ret;
84 }
85 
86 /* Bring-down the wwan net link */
87 static int ipc_wwan_link_stop(struct net_device *netdev)
88 {
89 	struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
90 
91 	netif_stop_queue(netdev);
92 
93 	pm_runtime_get_sync(priv->ipc_wwan->ipc_imem->dev);
94 	ipc_imem_sys_wwan_close(priv->ipc_wwan->ipc_imem, priv->if_id,
95 				priv->ch_id);
96 	priv->ch_id = -1;
97 	pm_runtime_mark_last_busy(priv->ipc_wwan->ipc_imem->dev);
98 	pm_runtime_put_autosuspend(priv->ipc_wwan->ipc_imem->dev);
99 
100 	return 0;
101 }
102 
103 /* Transmit a packet */
104 static netdev_tx_t ipc_wwan_link_transmit(struct sk_buff *skb,
105 					  struct net_device *netdev)
106 {
107 	struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
108 	struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
109 	unsigned int len = skb->len;
110 	int if_id = priv->if_id;
111 	int ret;
112 
113 	/* Interface IDs from 1 to 8 are for IP data
114 	 * & from 257 to 261 are for non-IP data
115 	 */
116 	if (if_id < IP_MUX_SESSION_START ||
117 	    if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
118 		return -EINVAL;
119 
120 	pm_runtime_get(ipc_wwan->ipc_imem->dev);
121 	/* Send the SKB to device for transmission */
122 	ret = ipc_imem_sys_wwan_transmit(ipc_wwan->ipc_imem,
123 					 if_id, priv->ch_id, skb);
124 
125 	/* Return code of zero is success */
126 	if (ret == 0) {
127 		netdev->stats.tx_packets++;
128 		netdev->stats.tx_bytes += len;
129 		ret = NETDEV_TX_OK;
130 	} else if (ret == -EBUSY) {
131 		ret = NETDEV_TX_BUSY;
132 		dev_err(ipc_wwan->dev, "unable to push packets");
133 	} else {
134 		pm_runtime_mark_last_busy(ipc_wwan->ipc_imem->dev);
135 		pm_runtime_put_autosuspend(ipc_wwan->ipc_imem->dev);
136 		goto exit;
137 	}
138 
139 	pm_runtime_mark_last_busy(ipc_wwan->ipc_imem->dev);
140 	pm_runtime_put_autosuspend(ipc_wwan->ipc_imem->dev);
141 
142 	return ret;
143 
144 exit:
145 	/* Log any skb drop */
146 	if (if_id)
147 		dev_dbg(ipc_wwan->dev, "skb dropped. IF_ID: %d, ret: %d", if_id,
148 			ret);
149 
150 	dev_kfree_skb_any(skb);
151 	netdev->stats.tx_dropped++;
152 	return NETDEV_TX_OK;
153 }
154 
155 /* Ops structure for wwan net link */
156 static const struct net_device_ops ipc_inm_ops = {
157 	.ndo_open = ipc_wwan_link_open,
158 	.ndo_stop = ipc_wwan_link_stop,
159 	.ndo_start_xmit = ipc_wwan_link_transmit,
160 };
161 
162 /* Setup function for creating new net link */
163 static void ipc_wwan_setup(struct net_device *iosm_dev)
164 {
165 	iosm_dev->header_ops = NULL;
166 	iosm_dev->hard_header_len = 0;
167 	iosm_dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
168 
169 	iosm_dev->type = ARPHRD_NONE;
170 	iosm_dev->mtu = ETH_DATA_LEN;
171 	iosm_dev->min_mtu = ETH_MIN_MTU;
172 	iosm_dev->max_mtu = ETH_MAX_MTU;
173 
174 	iosm_dev->flags = IFF_POINTOPOINT | IFF_NOARP;
175 	iosm_dev->needs_free_netdev = true;
176 
177 	iosm_dev->netdev_ops = &ipc_inm_ops;
178 }
179 
180 /* Create new wwan net link */
181 static int ipc_wwan_newlink(void *ctxt, struct net_device *dev,
182 			    u32 if_id, struct netlink_ext_ack *extack)
183 {
184 	struct iosm_wwan *ipc_wwan = ctxt;
185 	struct iosm_netdev_priv *priv;
186 	int err;
187 
188 	if (if_id < IP_MUX_SESSION_START ||
189 	    if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
190 		return -EINVAL;
191 
192 	priv = wwan_netdev_drvpriv(dev);
193 	priv->if_id = if_id;
194 	priv->netdev = dev;
195 	priv->ipc_wwan = ipc_wwan;
196 
197 	if (rcu_access_pointer(ipc_wwan->sub_netlist[if_id]))
198 		return -EBUSY;
199 
200 	err = register_netdevice(dev);
201 	if (err)
202 		return err;
203 
204 	rcu_assign_pointer(ipc_wwan->sub_netlist[if_id], priv);
205 	netif_device_attach(dev);
206 
207 	return 0;
208 }
209 
210 static void ipc_wwan_dellink(void *ctxt, struct net_device *dev,
211 			     struct list_head *head)
212 {
213 	struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(dev);
214 	struct iosm_wwan *ipc_wwan = ctxt;
215 	int if_id = priv->if_id;
216 
217 	if (WARN_ON(if_id < IP_MUX_SESSION_START ||
218 		    if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist)))
219 		return;
220 
221 	if (WARN_ON(rcu_access_pointer(ipc_wwan->sub_netlist[if_id]) != priv))
222 		return;
223 
224 	RCU_INIT_POINTER(ipc_wwan->sub_netlist[if_id], NULL);
225 	/* unregistering includes synchronize_net() */
226 	unregister_netdevice_queue(dev, head);
227 }
228 
229 static const struct wwan_ops iosm_wwan_ops = {
230 	.priv_size = sizeof(struct iosm_netdev_priv),
231 	.setup = ipc_wwan_setup,
232 	.newlink = ipc_wwan_newlink,
233 	.dellink = ipc_wwan_dellink,
234 };
235 
236 int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg,
237 		     bool dss, int if_id)
238 {
239 	struct sk_buff *skb = skb_arg;
240 	struct net_device_stats *stats;
241 	struct iosm_netdev_priv *priv;
242 	int ret;
243 
244 	if ((skb->data[0] & IOSM_IP_TYPE_MASK) == IOSM_IP_TYPE_IPV4)
245 		skb->protocol = htons(ETH_P_IP);
246 	else if ((skb->data[0] & IOSM_IP_TYPE_MASK) ==
247 		 IOSM_IP_TYPE_IPV6)
248 		skb->protocol = htons(ETH_P_IPV6);
249 
250 	skb->pkt_type = PACKET_HOST;
251 
252 	if (if_id < IP_MUX_SESSION_START ||
253 	    if_id > IP_MUX_SESSION_END) {
254 		ret = -EINVAL;
255 		goto free;
256 	}
257 
258 	rcu_read_lock();
259 	priv = rcu_dereference(ipc_wwan->sub_netlist[if_id]);
260 	if (!priv) {
261 		ret = -EINVAL;
262 		goto unlock;
263 	}
264 	skb->dev = priv->netdev;
265 	stats = &priv->netdev->stats;
266 	stats->rx_packets++;
267 	stats->rx_bytes += skb->len;
268 
269 	ret = netif_rx(skb);
270 	skb = NULL;
271 unlock:
272 	rcu_read_unlock();
273 free:
274 	dev_kfree_skb(skb);
275 	return ret;
276 }
277 
278 void ipc_wwan_tx_flowctrl(struct iosm_wwan *ipc_wwan, int if_id, bool on)
279 {
280 	struct net_device *netdev;
281 	struct iosm_netdev_priv *priv;
282 	bool is_tx_blk;
283 
284 	rcu_read_lock();
285 	priv = rcu_dereference(ipc_wwan->sub_netlist[if_id]);
286 	if (!priv) {
287 		rcu_read_unlock();
288 		return;
289 	}
290 
291 	netdev = priv->netdev;
292 
293 	is_tx_blk = netif_queue_stopped(netdev);
294 
295 	if (on)
296 		dev_dbg(ipc_wwan->dev, "session id[%d]: flowctrl enable",
297 			if_id);
298 
299 	if (on && !is_tx_blk)
300 		netif_stop_queue(netdev);
301 	else if (!on && is_tx_blk)
302 		netif_wake_queue(netdev);
303 	rcu_read_unlock();
304 }
305 
306 struct iosm_wwan *ipc_wwan_init(struct iosm_imem *ipc_imem, struct device *dev)
307 {
308 	struct iosm_wwan *ipc_wwan;
309 
310 	ipc_wwan = kzalloc(sizeof(*ipc_wwan), GFP_KERNEL);
311 	if (!ipc_wwan)
312 		return NULL;
313 
314 	ipc_wwan->dev = dev;
315 	ipc_wwan->ipc_imem = ipc_imem;
316 
317 	/* WWAN core will create a netdev for the default IP MUX channel */
318 	if (wwan_register_ops(ipc_wwan->dev, &iosm_wwan_ops, ipc_wwan,
319 			      IP_MUX_SESSION_DEFAULT)) {
320 		kfree(ipc_wwan);
321 		return NULL;
322 	}
323 
324 	return ipc_wwan;
325 }
326 
327 void ipc_wwan_deinit(struct iosm_wwan *ipc_wwan)
328 {
329 	/* This call will remove all child netdev(s) */
330 	wwan_unregister_ops(ipc_wwan->dev);
331 
332 	kfree(ipc_wwan);
333 }
334