xref: /openbmc/linux/net/core/flow_dissector.c (revision 39b6f3aa)
1 #include <linux/skbuff.h>
2 #include <linux/export.h>
3 #include <linux/ip.h>
4 #include <linux/ipv6.h>
5 #include <linux/if_vlan.h>
6 #include <net/ip.h>
7 #include <net/ipv6.h>
8 #include <linux/igmp.h>
9 #include <linux/icmp.h>
10 #include <linux/sctp.h>
11 #include <linux/dccp.h>
12 #include <linux/if_tunnel.h>
13 #include <linux/if_pppox.h>
14 #include <linux/ppp_defs.h>
15 #include <net/flow_keys.h>
16 
17 /* copy saddr & daddr, possibly using 64bit load/store
18  * Equivalent to :	flow->src = iph->saddr;
19  *			flow->dst = iph->daddr;
20  */
21 static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph)
22 {
23 	BUILD_BUG_ON(offsetof(typeof(*flow), dst) !=
24 		     offsetof(typeof(*flow), src) + sizeof(flow->src));
25 	memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
26 }
27 
28 bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
29 {
30 	int poff, nhoff = skb_network_offset(skb);
31 	u8 ip_proto;
32 	__be16 proto = skb->protocol;
33 
34 	memset(flow, 0, sizeof(*flow));
35 
36 again:
37 	switch (proto) {
38 	case __constant_htons(ETH_P_IP): {
39 		const struct iphdr *iph;
40 		struct iphdr _iph;
41 ip:
42 		iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
43 		if (!iph)
44 			return false;
45 
46 		if (ip_is_fragment(iph))
47 			ip_proto = 0;
48 		else
49 			ip_proto = iph->protocol;
50 		iph_to_flow_copy_addrs(flow, iph);
51 		nhoff += iph->ihl * 4;
52 		break;
53 	}
54 	case __constant_htons(ETH_P_IPV6): {
55 		const struct ipv6hdr *iph;
56 		struct ipv6hdr _iph;
57 ipv6:
58 		iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
59 		if (!iph)
60 			return false;
61 
62 		ip_proto = iph->nexthdr;
63 		flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
64 		flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
65 		nhoff += sizeof(struct ipv6hdr);
66 		break;
67 	}
68 	case __constant_htons(ETH_P_8021Q): {
69 		const struct vlan_hdr *vlan;
70 		struct vlan_hdr _vlan;
71 
72 		vlan = skb_header_pointer(skb, nhoff, sizeof(_vlan), &_vlan);
73 		if (!vlan)
74 			return false;
75 
76 		proto = vlan->h_vlan_encapsulated_proto;
77 		nhoff += sizeof(*vlan);
78 		goto again;
79 	}
80 	case __constant_htons(ETH_P_PPP_SES): {
81 		struct {
82 			struct pppoe_hdr hdr;
83 			__be16 proto;
84 		} *hdr, _hdr;
85 		hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
86 		if (!hdr)
87 			return false;
88 		proto = hdr->proto;
89 		nhoff += PPPOE_SES_HLEN;
90 		switch (proto) {
91 		case __constant_htons(PPP_IP):
92 			goto ip;
93 		case __constant_htons(PPP_IPV6):
94 			goto ipv6;
95 		default:
96 			return false;
97 		}
98 	}
99 	default:
100 		return false;
101 	}
102 
103 	switch (ip_proto) {
104 	case IPPROTO_GRE: {
105 		struct gre_hdr {
106 			__be16 flags;
107 			__be16 proto;
108 		} *hdr, _hdr;
109 
110 		hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
111 		if (!hdr)
112 			return false;
113 		/*
114 		 * Only look inside GRE if version zero and no
115 		 * routing
116 		 */
117 		if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) {
118 			proto = hdr->proto;
119 			nhoff += 4;
120 			if (hdr->flags & GRE_CSUM)
121 				nhoff += 4;
122 			if (hdr->flags & GRE_KEY)
123 				nhoff += 4;
124 			if (hdr->flags & GRE_SEQ)
125 				nhoff += 4;
126 			if (proto == htons(ETH_P_TEB)) {
127 				const struct ethhdr *eth;
128 				struct ethhdr _eth;
129 
130 				eth = skb_header_pointer(skb, nhoff,
131 							 sizeof(_eth), &_eth);
132 				if (!eth)
133 					return false;
134 				proto = eth->h_proto;
135 				nhoff += sizeof(*eth);
136 			}
137 			goto again;
138 		}
139 		break;
140 	}
141 	case IPPROTO_IPIP:
142 		goto again;
143 	default:
144 		break;
145 	}
146 
147 	flow->ip_proto = ip_proto;
148 	poff = proto_ports_offset(ip_proto);
149 	if (poff >= 0) {
150 		__be32 *ports, _ports;
151 
152 		nhoff += poff;
153 		ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports);
154 		if (ports)
155 			flow->ports = *ports;
156 	}
157 
158 	flow->thoff = (u16) nhoff;
159 
160 	return true;
161 }
162 EXPORT_SYMBOL(skb_flow_dissect);
163 
164 static u32 hashrnd __read_mostly;
165 
166 /*
167  * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
168  * and src/dst port numbers.  Sets rxhash in skb to non-zero hash value
169  * on success, zero indicates no valid hash.  Also, sets l4_rxhash in skb
170  * if hash is a canonical 4-tuple hash over transport ports.
171  */
172 void __skb_get_rxhash(struct sk_buff *skb)
173 {
174 	struct flow_keys keys;
175 	u32 hash;
176 
177 	if (!skb_flow_dissect(skb, &keys))
178 		return;
179 
180 	if (keys.ports)
181 		skb->l4_rxhash = 1;
182 
183 	/* get a consistent hash (same value on both flow directions) */
184 	if (((__force u32)keys.dst < (__force u32)keys.src) ||
185 	    (((__force u32)keys.dst == (__force u32)keys.src) &&
186 	     ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
187 		swap(keys.dst, keys.src);
188 		swap(keys.port16[0], keys.port16[1]);
189 	}
190 
191 	hash = jhash_3words((__force u32)keys.dst,
192 			    (__force u32)keys.src,
193 			    (__force u32)keys.ports, hashrnd);
194 	if (!hash)
195 		hash = 1;
196 
197 	skb->rxhash = hash;
198 }
199 EXPORT_SYMBOL(__skb_get_rxhash);
200 
201 /*
202  * Returns a Tx hash based on the given packet descriptor a Tx queues' number
203  * to be used as a distribution range.
204  */
205 u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
206 		  unsigned int num_tx_queues)
207 {
208 	u32 hash;
209 	u16 qoffset = 0;
210 	u16 qcount = num_tx_queues;
211 
212 	if (skb_rx_queue_recorded(skb)) {
213 		hash = skb_get_rx_queue(skb);
214 		while (unlikely(hash >= num_tx_queues))
215 			hash -= num_tx_queues;
216 		return hash;
217 	}
218 
219 	if (dev->num_tc) {
220 		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
221 		qoffset = dev->tc_to_txq[tc].offset;
222 		qcount = dev->tc_to_txq[tc].count;
223 	}
224 
225 	if (skb->sk && skb->sk->sk_hash)
226 		hash = skb->sk->sk_hash;
227 	else
228 		hash = (__force u16) skb->protocol;
229 	hash = jhash_1word(hash, hashrnd);
230 
231 	return (u16) (((u64) hash * qcount) >> 32) + qoffset;
232 }
233 EXPORT_SYMBOL(__skb_tx_hash);
234 
235 /* __skb_get_poff() returns the offset to the payload as far as it could
236  * be dissected. The main user is currently BPF, so that we can dynamically
237  * truncate packets without needing to push actual payload to the user
238  * space and can analyze headers only, instead.
239  */
240 u32 __skb_get_poff(const struct sk_buff *skb)
241 {
242 	struct flow_keys keys;
243 	u32 poff = 0;
244 
245 	if (!skb_flow_dissect(skb, &keys))
246 		return 0;
247 
248 	poff += keys.thoff;
249 	switch (keys.ip_proto) {
250 	case IPPROTO_TCP: {
251 		const struct tcphdr *tcph;
252 		struct tcphdr _tcph;
253 
254 		tcph = skb_header_pointer(skb, poff, sizeof(_tcph), &_tcph);
255 		if (!tcph)
256 			return poff;
257 
258 		poff += max_t(u32, sizeof(struct tcphdr), tcph->doff * 4);
259 		break;
260 	}
261 	case IPPROTO_UDP:
262 	case IPPROTO_UDPLITE:
263 		poff += sizeof(struct udphdr);
264 		break;
265 	/* For the rest, we do not really care about header
266 	 * extensions at this point for now.
267 	 */
268 	case IPPROTO_ICMP:
269 		poff += sizeof(struct icmphdr);
270 		break;
271 	case IPPROTO_ICMPV6:
272 		poff += sizeof(struct icmp6hdr);
273 		break;
274 	case IPPROTO_IGMP:
275 		poff += sizeof(struct igmphdr);
276 		break;
277 	case IPPROTO_DCCP:
278 		poff += sizeof(struct dccp_hdr);
279 		break;
280 	case IPPROTO_SCTP:
281 		poff += sizeof(struct sctphdr);
282 		break;
283 	}
284 
285 	return poff;
286 }
287 
288 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
289 {
290 	if (unlikely(queue_index >= dev->real_num_tx_queues)) {
291 		net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
292 				     dev->name, queue_index,
293 				     dev->real_num_tx_queues);
294 		return 0;
295 	}
296 	return queue_index;
297 }
298 
299 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
300 {
301 #ifdef CONFIG_XPS
302 	struct xps_dev_maps *dev_maps;
303 	struct xps_map *map;
304 	int queue_index = -1;
305 
306 	rcu_read_lock();
307 	dev_maps = rcu_dereference(dev->xps_maps);
308 	if (dev_maps) {
309 		map = rcu_dereference(
310 		    dev_maps->cpu_map[raw_smp_processor_id()]);
311 		if (map) {
312 			if (map->len == 1)
313 				queue_index = map->queues[0];
314 			else {
315 				u32 hash;
316 				if (skb->sk && skb->sk->sk_hash)
317 					hash = skb->sk->sk_hash;
318 				else
319 					hash = (__force u16) skb->protocol ^
320 					    skb->rxhash;
321 				hash = jhash_1word(hash, hashrnd);
322 				queue_index = map->queues[
323 				    ((u64)hash * map->len) >> 32];
324 			}
325 			if (unlikely(queue_index >= dev->real_num_tx_queues))
326 				queue_index = -1;
327 		}
328 	}
329 	rcu_read_unlock();
330 
331 	return queue_index;
332 #else
333 	return -1;
334 #endif
335 }
336 
337 u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
338 {
339 	struct sock *sk = skb->sk;
340 	int queue_index = sk_tx_queue_get(sk);
341 
342 	if (queue_index < 0 || skb->ooo_okay ||
343 	    queue_index >= dev->real_num_tx_queues) {
344 		int new_index = get_xps_queue(dev, skb);
345 		if (new_index < 0)
346 			new_index = skb_tx_hash(dev, skb);
347 
348 		if (queue_index != new_index && sk) {
349 			struct dst_entry *dst =
350 				    rcu_dereference_check(sk->sk_dst_cache, 1);
351 
352 			if (dst && skb_dst(skb) == dst)
353 				sk_tx_queue_set(sk, queue_index);
354 
355 		}
356 
357 		queue_index = new_index;
358 	}
359 
360 	return queue_index;
361 }
362 EXPORT_SYMBOL(__netdev_pick_tx);
363 
364 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
365 				    struct sk_buff *skb)
366 {
367 	int queue_index = 0;
368 
369 	if (dev->real_num_tx_queues != 1) {
370 		const struct net_device_ops *ops = dev->netdev_ops;
371 		if (ops->ndo_select_queue)
372 			queue_index = ops->ndo_select_queue(dev, skb);
373 		else
374 			queue_index = __netdev_pick_tx(dev, skb);
375 		queue_index = dev_cap_txqueue(dev, queue_index);
376 	}
377 
378 	skb_set_queue_mapping(skb, queue_index);
379 	return netdev_get_tx_queue(dev, queue_index);
380 }
381 
382 static int __init initialize_hashrnd(void)
383 {
384 	get_random_bytes(&hashrnd, sizeof(hashrnd));
385 	return 0;
386 }
387 
388 late_initcall_sync(initialize_hashrnd);
389