xref: /openbmc/linux/drivers/net/ifb.c (revision e868d61272caa648214046a096e5a6bfc068dc8c)
1 /* drivers/net/ifb.c:
2 
3 	The purpose of this driver is to provide a device that allows
4 	for sharing of resources:
5 
6 	1) qdiscs/policies that are per device as opposed to system wide.
7 	ifb allows for a device which can be redirected to thus providing
8 	an impression of sharing.
9 
10 	2) Allows for queueing incoming traffic for shaping instead of
11 	dropping.
12 
13 	The original concept is based on what is known as the IMQ
14 	driver initially written by Martin Devera, later rewritten
15 	by Patrick McHardy and then maintained by Andre Correa.
16 
17 	You need the tc action  mirror or redirect to feed this device
18        	packets.
19 
20 	This program is free software; you can redistribute it and/or
21 	modify it under the terms of the GNU General Public License
22 	as published by the Free Software Foundation; either version
23 	2 of the License, or (at your option) any later version.
24 
25   	Authors:	Jamal Hadi Salim (2005)
26 
27 */
28 
29 
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/init.h>
35 #include <linux/moduleparam.h>
36 #include <net/pkt_sched.h>
37 
38 #define TX_TIMEOUT  (2*HZ)
39 
40 #define TX_Q_LIMIT    32
41 struct ifb_private {
42 	struct net_device_stats stats;
43 	struct tasklet_struct   ifb_tasklet;
44 	int     tasklet_pending;
45 	/* mostly debug stats leave in for now */
46 	unsigned long   st_task_enter; /* tasklet entered */
47 	unsigned long   st_txq_refl_try; /* transmit queue refill attempt */
48 	unsigned long   st_rxq_enter; /* receive queue entered */
49 	unsigned long   st_rx2tx_tran; /* receive to trasmit transfers */
50 	unsigned long   st_rxq_notenter; /*receiveQ not entered, resched */
51 	unsigned long   st_rx_frm_egr; /* received from egress path */
52 	unsigned long   st_rx_frm_ing; /* received from ingress path */
53 	unsigned long   st_rxq_check;
54 	unsigned long   st_rxq_rsch;
55 	struct sk_buff_head     rq;
56 	struct sk_buff_head     tq;
57 };
58 
59 static int numifbs = 2;
60 
61 static void ri_tasklet(unsigned long dev);
62 static int ifb_xmit(struct sk_buff *skb, struct net_device *dev);
63 static struct net_device_stats *ifb_get_stats(struct net_device *dev);
64 static int ifb_open(struct net_device *dev);
65 static int ifb_close(struct net_device *dev);
66 
67 static void ri_tasklet(unsigned long dev)
68 {
69 
70 	struct net_device *_dev = (struct net_device *)dev;
71 	struct ifb_private *dp = netdev_priv(_dev);
72 	struct net_device_stats *stats = &dp->stats;
73 	struct sk_buff *skb;
74 
75 	dp->st_task_enter++;
76 	if ((skb = skb_peek(&dp->tq)) == NULL) {
77 		dp->st_txq_refl_try++;
78 		if (netif_tx_trylock(_dev)) {
79 			dp->st_rxq_enter++;
80 			while ((skb = skb_dequeue(&dp->rq)) != NULL) {
81 				skb_queue_tail(&dp->tq, skb);
82 				dp->st_rx2tx_tran++;
83 			}
84 			netif_tx_unlock(_dev);
85 		} else {
86 			/* reschedule */
87 			dp->st_rxq_notenter++;
88 			goto resched;
89 		}
90 	}
91 
92 	while ((skb = skb_dequeue(&dp->tq)) != NULL) {
93 		u32 from = G_TC_FROM(skb->tc_verd);
94 
95 		skb->tc_verd = 0;
96 		skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
97 		stats->tx_packets++;
98 		stats->tx_bytes +=skb->len;
99 
100 		skb->dev = __dev_get_by_index(skb->iif);
101 		if (!skb->dev) {
102 			dev_kfree_skb(skb);
103 			stats->tx_dropped++;
104 			break;
105 		}
106 		skb->iif = _dev->ifindex;
107 
108 		if (from & AT_EGRESS) {
109 			dp->st_rx_frm_egr++;
110 			dev_queue_xmit(skb);
111 		} else if (from & AT_INGRESS) {
112 			dp->st_rx_frm_ing++;
113 			skb_pull(skb, skb->dev->hard_header_len);
114 			netif_rx(skb);
115 		} else
116 			BUG();
117 	}
118 
119 	if (netif_tx_trylock(_dev)) {
120 		dp->st_rxq_check++;
121 		if ((skb = skb_peek(&dp->rq)) == NULL) {
122 			dp->tasklet_pending = 0;
123 			if (netif_queue_stopped(_dev))
124 				netif_wake_queue(_dev);
125 		} else {
126 			dp->st_rxq_rsch++;
127 			netif_tx_unlock(_dev);
128 			goto resched;
129 		}
130 		netif_tx_unlock(_dev);
131 	} else {
132 resched:
133 		dp->tasklet_pending = 1;
134 		tasklet_schedule(&dp->ifb_tasklet);
135 	}
136 
137 }
138 
139 static void __init ifb_setup(struct net_device *dev)
140 {
141 	/* Initialize the device structure. */
142 	dev->get_stats = ifb_get_stats;
143 	dev->hard_start_xmit = ifb_xmit;
144 	dev->open = &ifb_open;
145 	dev->stop = &ifb_close;
146 
147 	/* Fill in device structure with ethernet-generic values. */
148 	ether_setup(dev);
149 	dev->tx_queue_len = TX_Q_LIMIT;
150 	dev->change_mtu = NULL;
151 	dev->flags |= IFF_NOARP;
152 	dev->flags &= ~IFF_MULTICAST;
153 	SET_MODULE_OWNER(dev);
154 	random_ether_addr(dev->dev_addr);
155 }
156 
157 static int ifb_xmit(struct sk_buff *skb, struct net_device *dev)
158 {
159 	struct ifb_private *dp = netdev_priv(dev);
160 	struct net_device_stats *stats = &dp->stats;
161 	int ret = 0;
162 	u32 from = G_TC_FROM(skb->tc_verd);
163 
164 	stats->rx_packets++;
165 	stats->rx_bytes+=skb->len;
166 
167 	if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->iif) {
168 		dev_kfree_skb(skb);
169 		stats->rx_dropped++;
170 		return ret;
171 	}
172 
173 	if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) {
174 		netif_stop_queue(dev);
175 	}
176 
177 	dev->trans_start = jiffies;
178 	skb_queue_tail(&dp->rq, skb);
179 	if (!dp->tasklet_pending) {
180 		dp->tasklet_pending = 1;
181 		tasklet_schedule(&dp->ifb_tasklet);
182 	}
183 
184 	return ret;
185 }
186 
187 static struct net_device_stats *ifb_get_stats(struct net_device *dev)
188 {
189 	struct ifb_private *dp = netdev_priv(dev);
190 	struct net_device_stats *stats = &dp->stats;
191 
192 	pr_debug("tasklets stats %ld:%ld:%ld:%ld:%ld:%ld:%ld:%ld:%ld \n",
193 		dp->st_task_enter, dp->st_txq_refl_try, dp->st_rxq_enter,
194 		dp->st_rx2tx_tran, dp->st_rxq_notenter, dp->st_rx_frm_egr,
195 		dp->st_rx_frm_ing, dp->st_rxq_check, dp->st_rxq_rsch);
196 
197 	return stats;
198 }
199 
200 static struct net_device **ifbs;
201 
202 /* Number of ifb devices to be set up by this module. */
203 module_param(numifbs, int, 0);
204 MODULE_PARM_DESC(numifbs, "Number of ifb devices");
205 
206 static int ifb_close(struct net_device *dev)
207 {
208 	struct ifb_private *dp = netdev_priv(dev);
209 
210 	tasklet_kill(&dp->ifb_tasklet);
211 	netif_stop_queue(dev);
212 	skb_queue_purge(&dp->rq);
213 	skb_queue_purge(&dp->tq);
214 	return 0;
215 }
216 
217 static int ifb_open(struct net_device *dev)
218 {
219 	struct ifb_private *dp = netdev_priv(dev);
220 
221 	tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev);
222 	skb_queue_head_init(&dp->rq);
223 	skb_queue_head_init(&dp->tq);
224 	netif_start_queue(dev);
225 
226 	return 0;
227 }
228 
229 static int __init ifb_init_one(int index)
230 {
231 	struct net_device *dev_ifb;
232 	int err;
233 
234 	dev_ifb = alloc_netdev(sizeof(struct ifb_private),
235 				 "ifb%d", ifb_setup);
236 
237 	if (!dev_ifb)
238 		return -ENOMEM;
239 
240 	if ((err = register_netdev(dev_ifb))) {
241 		free_netdev(dev_ifb);
242 		dev_ifb = NULL;
243 	} else {
244 		ifbs[index] = dev_ifb;
245 	}
246 
247 	return err;
248 }
249 
250 static void ifb_free_one(int index)
251 {
252 	unregister_netdev(ifbs[index]);
253 	free_netdev(ifbs[index]);
254 }
255 
256 static int __init ifb_init_module(void)
257 {
258 	int i, err = 0;
259 	ifbs = kmalloc(numifbs * sizeof(void *), GFP_KERNEL);
260 	if (!ifbs)
261 		return -ENOMEM;
262 	for (i = 0; i < numifbs && !err; i++)
263 		err = ifb_init_one(i);
264 	if (err) {
265 		i--;
266 		while (--i >= 0)
267 			ifb_free_one(i);
268 	}
269 
270 	return err;
271 }
272 
273 static void __exit ifb_cleanup_module(void)
274 {
275 	int i;
276 
277 	for (i = 0; i < numifbs; i++)
278 		ifb_free_one(i);
279 	kfree(ifbs);
280 }
281 
282 module_init(ifb_init_module);
283 module_exit(ifb_cleanup_module);
284 MODULE_LICENSE("GPL");
285 MODULE_AUTHOR("Jamal Hadi Salim");
286