1 /* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  *
13  * RMNET Data virtual network driver
14  *
15  */
16 
17 #include <linux/etherdevice.h>
18 #include <linux/if_arp.h>
19 #include <net/pkt_sched.h>
20 #include "rmnet_config.h"
21 #include "rmnet_handlers.h"
22 #include "rmnet_private.h"
23 #include "rmnet_map.h"
24 #include "rmnet_vnd.h"
25 
26 /* RX/TX Fixup */
27 
28 void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
29 {
30 	struct rmnet_priv *priv = netdev_priv(dev);
31 	struct rmnet_pcpu_stats *pcpu_ptr;
32 
33 	pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
34 
35 	u64_stats_update_begin(&pcpu_ptr->syncp);
36 	pcpu_ptr->stats.rx_pkts++;
37 	pcpu_ptr->stats.rx_bytes += skb->len;
38 	u64_stats_update_end(&pcpu_ptr->syncp);
39 }
40 
41 void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
42 {
43 	struct rmnet_priv *priv = netdev_priv(dev);
44 	struct rmnet_pcpu_stats *pcpu_ptr;
45 
46 	pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
47 
48 	u64_stats_update_begin(&pcpu_ptr->syncp);
49 	pcpu_ptr->stats.tx_pkts++;
50 	pcpu_ptr->stats.tx_bytes += skb->len;
51 	u64_stats_update_end(&pcpu_ptr->syncp);
52 }
53 
54 /* Network Device Operations */
55 
56 static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
57 					struct net_device *dev)
58 {
59 	struct rmnet_priv *priv;
60 
61 	priv = netdev_priv(dev);
62 	if (priv->real_dev) {
63 		rmnet_egress_handler(skb);
64 	} else {
65 		this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
66 		kfree_skb(skb);
67 	}
68 	return NETDEV_TX_OK;
69 }
70 
71 static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
72 {
73 	if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE)
74 		return -EINVAL;
75 
76 	rmnet_dev->mtu = new_mtu;
77 	return 0;
78 }
79 
80 static int rmnet_vnd_get_iflink(const struct net_device *dev)
81 {
82 	struct rmnet_priv *priv = netdev_priv(dev);
83 
84 	return priv->real_dev->ifindex;
85 }
86 
87 static int rmnet_vnd_init(struct net_device *dev)
88 {
89 	struct rmnet_priv *priv = netdev_priv(dev);
90 	int err;
91 
92 	priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats);
93 	if (!priv->pcpu_stats)
94 		return -ENOMEM;
95 
96 	err = gro_cells_init(&priv->gro_cells, dev);
97 	if (err) {
98 		free_percpu(priv->pcpu_stats);
99 		return err;
100 	}
101 
102 	return 0;
103 }
104 
105 static void rmnet_vnd_uninit(struct net_device *dev)
106 {
107 	struct rmnet_priv *priv = netdev_priv(dev);
108 
109 	gro_cells_destroy(&priv->gro_cells);
110 	free_percpu(priv->pcpu_stats);
111 }
112 
113 static void rmnet_get_stats64(struct net_device *dev,
114 			      struct rtnl_link_stats64 *s)
115 {
116 	struct rmnet_priv *priv = netdev_priv(dev);
117 	struct rmnet_vnd_stats total_stats;
118 	struct rmnet_pcpu_stats *pcpu_ptr;
119 	unsigned int cpu, start;
120 
121 	memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
122 
123 	for_each_possible_cpu(cpu) {
124 		pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
125 
126 		do {
127 			start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
128 			total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
129 			total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
130 			total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
131 			total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
132 		} while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
133 
134 		total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
135 	}
136 
137 	s->rx_packets = total_stats.rx_pkts;
138 	s->rx_bytes = total_stats.rx_bytes;
139 	s->tx_packets = total_stats.tx_pkts;
140 	s->tx_bytes = total_stats.tx_bytes;
141 	s->tx_dropped = total_stats.tx_drops;
142 }
143 
144 static const struct net_device_ops rmnet_vnd_ops = {
145 	.ndo_start_xmit = rmnet_vnd_start_xmit,
146 	.ndo_change_mtu = rmnet_vnd_change_mtu,
147 	.ndo_get_iflink = rmnet_vnd_get_iflink,
148 	.ndo_add_slave  = rmnet_add_bridge,
149 	.ndo_del_slave  = rmnet_del_bridge,
150 	.ndo_init       = rmnet_vnd_init,
151 	.ndo_uninit     = rmnet_vnd_uninit,
152 	.ndo_get_stats64 = rmnet_get_stats64,
153 };
154 
155 /* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
156  * flags, ARP type, needed headroom, etc...
157  */
158 void rmnet_vnd_setup(struct net_device *rmnet_dev)
159 {
160 	rmnet_dev->netdev_ops = &rmnet_vnd_ops;
161 	rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
162 	rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
163 	random_ether_addr(rmnet_dev->dev_addr);
164 	rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
165 
166 	/* Raw IP mode */
167 	rmnet_dev->header_ops = NULL;  /* No header */
168 	rmnet_dev->type = ARPHRD_RAWIP;
169 	rmnet_dev->hard_header_len = 0;
170 	rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
171 
172 	rmnet_dev->needs_free_netdev = true;
173 }
174 
175 /* Exposed API */
176 
177 int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
178 		      struct rmnet_port *port,
179 		      struct net_device *real_dev,
180 		      struct rmnet_endpoint *ep)
181 {
182 	struct rmnet_priv *priv;
183 	int rc;
184 
185 	if (ep->egress_dev)
186 		return -EINVAL;
187 
188 	if (rmnet_get_endpoint(port, id))
189 		return -EBUSY;
190 
191 	rmnet_dev->hw_features = NETIF_F_RXCSUM;
192 	rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
193 	rmnet_dev->hw_features |= NETIF_F_SG;
194 
195 	rc = register_netdevice(rmnet_dev);
196 	if (!rc) {
197 		ep->egress_dev = rmnet_dev;
198 		ep->mux_id = id;
199 		port->nr_rmnet_devs++;
200 
201 		rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
202 
203 		priv = netdev_priv(rmnet_dev);
204 		priv->mux_id = id;
205 		priv->real_dev = real_dev;
206 
207 		netdev_dbg(rmnet_dev, "rmnet dev created\n");
208 	}
209 
210 	return rc;
211 }
212 
213 int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
214 		      struct rmnet_endpoint *ep)
215 {
216 	if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev)
217 		return -EINVAL;
218 
219 	ep->egress_dev = NULL;
220 	port->nr_rmnet_devs--;
221 	return 0;
222 }
223 
224 u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev)
225 {
226 	struct rmnet_priv *priv;
227 
228 	priv = netdev_priv(rmnet_dev);
229 	return priv->mux_id;
230 }
231 
232 int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
233 {
234 	netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
235 	/* Although we expect similar number of enable/disable
236 	 * commands, optimize for the disable. That is more
237 	 * latency sensitive than enable
238 	 */
239 	if (unlikely(enable))
240 		netif_wake_queue(rmnet_dev);
241 	else
242 		netif_stop_queue(rmnet_dev);
243 
244 	return 0;
245 }
246