xref: /openbmc/linux/net/openvswitch/flow.c (revision b34e08d5)
1 /*
2  * Copyright (c) 2007-2013 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18 
19 #include "flow.h"
20 #include "datapath.h"
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/jhash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
32 #include <linux/in.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
35 #include <linux/ip.h>
36 #include <linux/ipv6.h>
37 #include <linux/sctp.h>
38 #include <linux/smp.h>
39 #include <linux/tcp.h>
40 #include <linux/udp.h>
41 #include <linux/icmp.h>
42 #include <linux/icmpv6.h>
43 #include <linux/rculist.h>
44 #include <net/ip.h>
45 #include <net/ip_tunnels.h>
46 #include <net/ipv6.h>
47 #include <net/ndisc.h>
48 
49 u64 ovs_flow_used_time(unsigned long flow_jiffies)
50 {
51 	struct timespec cur_ts;
52 	u64 cur_ms, idle_ms;
53 
54 	ktime_get_ts(&cur_ts);
55 	idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
56 	cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
57 		 cur_ts.tv_nsec / NSEC_PER_MSEC;
58 
59 	return cur_ms - idle_ms;
60 }
61 
62 #define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
63 
64 void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
65 {
66 	struct flow_stats *stats;
67 	__be16 tcp_flags = 0;
68 
69 	if (!flow->stats.is_percpu)
70 		stats = flow->stats.stat;
71 	else
72 		stats = this_cpu_ptr(flow->stats.cpu_stats);
73 
74 	if ((flow->key.eth.type == htons(ETH_P_IP) ||
75 	     flow->key.eth.type == htons(ETH_P_IPV6)) &&
76 	    flow->key.ip.frag != OVS_FRAG_TYPE_LATER &&
77 	    flow->key.ip.proto == IPPROTO_TCP &&
78 	    likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
79 		tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb));
80 	}
81 
82 	spin_lock(&stats->lock);
83 	stats->used = jiffies;
84 	stats->packet_count++;
85 	stats->byte_count += skb->len;
86 	stats->tcp_flags |= tcp_flags;
87 	spin_unlock(&stats->lock);
88 }
89 
90 static void stats_read(struct flow_stats *stats,
91 		       struct ovs_flow_stats *ovs_stats,
92 		       unsigned long *used, __be16 *tcp_flags)
93 {
94 	spin_lock(&stats->lock);
95 	if (!*used || time_after(stats->used, *used))
96 		*used = stats->used;
97 	*tcp_flags |= stats->tcp_flags;
98 	ovs_stats->n_packets += stats->packet_count;
99 	ovs_stats->n_bytes += stats->byte_count;
100 	spin_unlock(&stats->lock);
101 }
102 
103 void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats,
104 			unsigned long *used, __be16 *tcp_flags)
105 {
106 	int cpu;
107 
108 	*used = 0;
109 	*tcp_flags = 0;
110 	memset(ovs_stats, 0, sizeof(*ovs_stats));
111 
112 	local_bh_disable();
113 	if (!flow->stats.is_percpu) {
114 		stats_read(flow->stats.stat, ovs_stats, used, tcp_flags);
115 	} else {
116 		for_each_possible_cpu(cpu) {
117 			struct flow_stats *stats;
118 
119 			stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
120 			stats_read(stats, ovs_stats, used, tcp_flags);
121 		}
122 	}
123 	local_bh_enable();
124 }
125 
126 static void stats_reset(struct flow_stats *stats)
127 {
128 	spin_lock(&stats->lock);
129 	stats->used = 0;
130 	stats->packet_count = 0;
131 	stats->byte_count = 0;
132 	stats->tcp_flags = 0;
133 	spin_unlock(&stats->lock);
134 }
135 
136 void ovs_flow_stats_clear(struct sw_flow *flow)
137 {
138 	int cpu;
139 
140 	local_bh_disable();
141 	if (!flow->stats.is_percpu) {
142 		stats_reset(flow->stats.stat);
143 	} else {
144 		for_each_possible_cpu(cpu) {
145 			stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu));
146 		}
147 	}
148 	local_bh_enable();
149 }
150 
151 static int check_header(struct sk_buff *skb, int len)
152 {
153 	if (unlikely(skb->len < len))
154 		return -EINVAL;
155 	if (unlikely(!pskb_may_pull(skb, len)))
156 		return -ENOMEM;
157 	return 0;
158 }
159 
160 static bool arphdr_ok(struct sk_buff *skb)
161 {
162 	return pskb_may_pull(skb, skb_network_offset(skb) +
163 				  sizeof(struct arp_eth_header));
164 }
165 
166 static int check_iphdr(struct sk_buff *skb)
167 {
168 	unsigned int nh_ofs = skb_network_offset(skb);
169 	unsigned int ip_len;
170 	int err;
171 
172 	err = check_header(skb, nh_ofs + sizeof(struct iphdr));
173 	if (unlikely(err))
174 		return err;
175 
176 	ip_len = ip_hdrlen(skb);
177 	if (unlikely(ip_len < sizeof(struct iphdr) ||
178 		     skb->len < nh_ofs + ip_len))
179 		return -EINVAL;
180 
181 	skb_set_transport_header(skb, nh_ofs + ip_len);
182 	return 0;
183 }
184 
185 static bool tcphdr_ok(struct sk_buff *skb)
186 {
187 	int th_ofs = skb_transport_offset(skb);
188 	int tcp_len;
189 
190 	if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))))
191 		return false;
192 
193 	tcp_len = tcp_hdrlen(skb);
194 	if (unlikely(tcp_len < sizeof(struct tcphdr) ||
195 		     skb->len < th_ofs + tcp_len))
196 		return false;
197 
198 	return true;
199 }
200 
201 static bool udphdr_ok(struct sk_buff *skb)
202 {
203 	return pskb_may_pull(skb, skb_transport_offset(skb) +
204 				  sizeof(struct udphdr));
205 }
206 
207 static bool sctphdr_ok(struct sk_buff *skb)
208 {
209 	return pskb_may_pull(skb, skb_transport_offset(skb) +
210 				  sizeof(struct sctphdr));
211 }
212 
213 static bool icmphdr_ok(struct sk_buff *skb)
214 {
215 	return pskb_may_pull(skb, skb_transport_offset(skb) +
216 				  sizeof(struct icmphdr));
217 }
218 
219 static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
220 {
221 	unsigned int nh_ofs = skb_network_offset(skb);
222 	unsigned int nh_len;
223 	int payload_ofs;
224 	struct ipv6hdr *nh;
225 	uint8_t nexthdr;
226 	__be16 frag_off;
227 	int err;
228 
229 	err = check_header(skb, nh_ofs + sizeof(*nh));
230 	if (unlikely(err))
231 		return err;
232 
233 	nh = ipv6_hdr(skb);
234 	nexthdr = nh->nexthdr;
235 	payload_ofs = (u8 *)(nh + 1) - skb->data;
236 
237 	key->ip.proto = NEXTHDR_NONE;
238 	key->ip.tos = ipv6_get_dsfield(nh);
239 	key->ip.ttl = nh->hop_limit;
240 	key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
241 	key->ipv6.addr.src = nh->saddr;
242 	key->ipv6.addr.dst = nh->daddr;
243 
244 	payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off);
245 	if (unlikely(payload_ofs < 0))
246 		return -EINVAL;
247 
248 	if (frag_off) {
249 		if (frag_off & htons(~0x7))
250 			key->ip.frag = OVS_FRAG_TYPE_LATER;
251 		else
252 			key->ip.frag = OVS_FRAG_TYPE_FIRST;
253 	}
254 
255 	nh_len = payload_ofs - nh_ofs;
256 	skb_set_transport_header(skb, nh_ofs + nh_len);
257 	key->ip.proto = nexthdr;
258 	return nh_len;
259 }
260 
261 static bool icmp6hdr_ok(struct sk_buff *skb)
262 {
263 	return pskb_may_pull(skb, skb_transport_offset(skb) +
264 				  sizeof(struct icmp6hdr));
265 }
266 
267 static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
268 {
269 	struct qtag_prefix {
270 		__be16 eth_type; /* ETH_P_8021Q */
271 		__be16 tci;
272 	};
273 	struct qtag_prefix *qp;
274 
275 	if (unlikely(skb->len < sizeof(struct qtag_prefix) + sizeof(__be16)))
276 		return 0;
277 
278 	if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) +
279 					 sizeof(__be16))))
280 		return -ENOMEM;
281 
282 	qp = (struct qtag_prefix *) skb->data;
283 	key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT);
284 	__skb_pull(skb, sizeof(struct qtag_prefix));
285 
286 	return 0;
287 }
288 
289 static __be16 parse_ethertype(struct sk_buff *skb)
290 {
291 	struct llc_snap_hdr {
292 		u8  dsap;  /* Always 0xAA */
293 		u8  ssap;  /* Always 0xAA */
294 		u8  ctrl;
295 		u8  oui[3];
296 		__be16 ethertype;
297 	};
298 	struct llc_snap_hdr *llc;
299 	__be16 proto;
300 
301 	proto = *(__be16 *) skb->data;
302 	__skb_pull(skb, sizeof(__be16));
303 
304 	if (ntohs(proto) >= ETH_P_802_3_MIN)
305 		return proto;
306 
307 	if (skb->len < sizeof(struct llc_snap_hdr))
308 		return htons(ETH_P_802_2);
309 
310 	if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr))))
311 		return htons(0);
312 
313 	llc = (struct llc_snap_hdr *) skb->data;
314 	if (llc->dsap != LLC_SAP_SNAP ||
315 	    llc->ssap != LLC_SAP_SNAP ||
316 	    (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
317 		return htons(ETH_P_802_2);
318 
319 	__skb_pull(skb, sizeof(struct llc_snap_hdr));
320 
321 	if (ntohs(llc->ethertype) >= ETH_P_802_3_MIN)
322 		return llc->ethertype;
323 
324 	return htons(ETH_P_802_2);
325 }
326 
327 static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
328 			int nh_len)
329 {
330 	struct icmp6hdr *icmp = icmp6_hdr(skb);
331 
332 	/* The ICMPv6 type and code fields use the 16-bit transport port
333 	 * fields, so we need to store them in 16-bit network byte order.
334 	 */
335 	key->ipv6.tp.src = htons(icmp->icmp6_type);
336 	key->ipv6.tp.dst = htons(icmp->icmp6_code);
337 
338 	if (icmp->icmp6_code == 0 &&
339 	    (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
340 	     icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
341 		int icmp_len = skb->len - skb_transport_offset(skb);
342 		struct nd_msg *nd;
343 		int offset;
344 
345 		/* In order to process neighbor discovery options, we need the
346 		 * entire packet.
347 		 */
348 		if (unlikely(icmp_len < sizeof(*nd)))
349 			return 0;
350 
351 		if (unlikely(skb_linearize(skb)))
352 			return -ENOMEM;
353 
354 		nd = (struct nd_msg *)skb_transport_header(skb);
355 		key->ipv6.nd.target = nd->target;
356 
357 		icmp_len -= sizeof(*nd);
358 		offset = 0;
359 		while (icmp_len >= 8) {
360 			struct nd_opt_hdr *nd_opt =
361 				 (struct nd_opt_hdr *)(nd->opt + offset);
362 			int opt_len = nd_opt->nd_opt_len * 8;
363 
364 			if (unlikely(!opt_len || opt_len > icmp_len))
365 				return 0;
366 
367 			/* Store the link layer address if the appropriate
368 			 * option is provided.  It is considered an error if
369 			 * the same link layer option is specified twice.
370 			 */
371 			if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
372 			    && opt_len == 8) {
373 				if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
374 					goto invalid;
375 				memcpy(key->ipv6.nd.sll,
376 				    &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
377 			} else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
378 				   && opt_len == 8) {
379 				if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
380 					goto invalid;
381 				memcpy(key->ipv6.nd.tll,
382 				    &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
383 			}
384 
385 			icmp_len -= opt_len;
386 			offset += opt_len;
387 		}
388 	}
389 
390 	return 0;
391 
392 invalid:
393 	memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
394 	memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
395 	memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
396 
397 	return 0;
398 }
399 
400 /**
401  * ovs_flow_extract - extracts a flow key from an Ethernet frame.
402  * @skb: sk_buff that contains the frame, with skb->data pointing to the
403  * Ethernet header
404  * @in_port: port number on which @skb was received.
405  * @key: output flow key
406  *
407  * The caller must ensure that skb->len >= ETH_HLEN.
408  *
409  * Returns 0 if successful, otherwise a negative errno value.
410  *
411  * Initializes @skb header pointers as follows:
412  *
413  *    - skb->mac_header: the Ethernet header.
414  *
415  *    - skb->network_header: just past the Ethernet header, or just past the
416  *      VLAN header, to the first byte of the Ethernet payload.
417  *
418  *    - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
419  *      on output, then just past the IP header, if one is present and
420  *      of a correct length, otherwise the same as skb->network_header.
421  *      For other key->eth.type values it is left untouched.
422  */
423 int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
424 {
425 	int error;
426 	struct ethhdr *eth;
427 
428 	memset(key, 0, sizeof(*key));
429 
430 	key->phy.priority = skb->priority;
431 	if (OVS_CB(skb)->tun_key)
432 		memcpy(&key->tun_key, OVS_CB(skb)->tun_key, sizeof(key->tun_key));
433 	key->phy.in_port = in_port;
434 	key->phy.skb_mark = skb->mark;
435 
436 	skb_reset_mac_header(skb);
437 
438 	/* Link layer.  We are guaranteed to have at least the 14 byte Ethernet
439 	 * header in the linear data area.
440 	 */
441 	eth = eth_hdr(skb);
442 	memcpy(key->eth.src, eth->h_source, ETH_ALEN);
443 	memcpy(key->eth.dst, eth->h_dest, ETH_ALEN);
444 
445 	__skb_pull(skb, 2 * ETH_ALEN);
446 	/* We are going to push all headers that we pull, so no need to
447 	 * update skb->csum here.
448 	 */
449 
450 	if (vlan_tx_tag_present(skb))
451 		key->eth.tci = htons(skb->vlan_tci);
452 	else if (eth->h_proto == htons(ETH_P_8021Q))
453 		if (unlikely(parse_vlan(skb, key)))
454 			return -ENOMEM;
455 
456 	key->eth.type = parse_ethertype(skb);
457 	if (unlikely(key->eth.type == htons(0)))
458 		return -ENOMEM;
459 
460 	skb_reset_network_header(skb);
461 	__skb_push(skb, skb->data - skb_mac_header(skb));
462 
463 	/* Network layer. */
464 	if (key->eth.type == htons(ETH_P_IP)) {
465 		struct iphdr *nh;
466 		__be16 offset;
467 
468 		error = check_iphdr(skb);
469 		if (unlikely(error)) {
470 			if (error == -EINVAL) {
471 				skb->transport_header = skb->network_header;
472 				error = 0;
473 			}
474 			return error;
475 		}
476 
477 		nh = ip_hdr(skb);
478 		key->ipv4.addr.src = nh->saddr;
479 		key->ipv4.addr.dst = nh->daddr;
480 
481 		key->ip.proto = nh->protocol;
482 		key->ip.tos = nh->tos;
483 		key->ip.ttl = nh->ttl;
484 
485 		offset = nh->frag_off & htons(IP_OFFSET);
486 		if (offset) {
487 			key->ip.frag = OVS_FRAG_TYPE_LATER;
488 			return 0;
489 		}
490 		if (nh->frag_off & htons(IP_MF) ||
491 			 skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
492 			key->ip.frag = OVS_FRAG_TYPE_FIRST;
493 
494 		/* Transport layer. */
495 		if (key->ip.proto == IPPROTO_TCP) {
496 			if (tcphdr_ok(skb)) {
497 				struct tcphdr *tcp = tcp_hdr(skb);
498 				key->ipv4.tp.src = tcp->source;
499 				key->ipv4.tp.dst = tcp->dest;
500 				key->ipv4.tp.flags = TCP_FLAGS_BE16(tcp);
501 			}
502 		} else if (key->ip.proto == IPPROTO_UDP) {
503 			if (udphdr_ok(skb)) {
504 				struct udphdr *udp = udp_hdr(skb);
505 				key->ipv4.tp.src = udp->source;
506 				key->ipv4.tp.dst = udp->dest;
507 			}
508 		} else if (key->ip.proto == IPPROTO_SCTP) {
509 			if (sctphdr_ok(skb)) {
510 				struct sctphdr *sctp = sctp_hdr(skb);
511 				key->ipv4.tp.src = sctp->source;
512 				key->ipv4.tp.dst = sctp->dest;
513 			}
514 		} else if (key->ip.proto == IPPROTO_ICMP) {
515 			if (icmphdr_ok(skb)) {
516 				struct icmphdr *icmp = icmp_hdr(skb);
517 				/* The ICMP type and code fields use the 16-bit
518 				 * transport port fields, so we need to store
519 				 * them in 16-bit network byte order. */
520 				key->ipv4.tp.src = htons(icmp->type);
521 				key->ipv4.tp.dst = htons(icmp->code);
522 			}
523 		}
524 
525 	} else if ((key->eth.type == htons(ETH_P_ARP) ||
526 		   key->eth.type == htons(ETH_P_RARP)) && arphdr_ok(skb)) {
527 		struct arp_eth_header *arp;
528 
529 		arp = (struct arp_eth_header *)skb_network_header(skb);
530 
531 		if (arp->ar_hrd == htons(ARPHRD_ETHER)
532 				&& arp->ar_pro == htons(ETH_P_IP)
533 				&& arp->ar_hln == ETH_ALEN
534 				&& arp->ar_pln == 4) {
535 
536 			/* We only match on the lower 8 bits of the opcode. */
537 			if (ntohs(arp->ar_op) <= 0xff)
538 				key->ip.proto = ntohs(arp->ar_op);
539 			memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
540 			memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
541 			memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
542 			memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
543 		}
544 	} else if (key->eth.type == htons(ETH_P_IPV6)) {
545 		int nh_len;             /* IPv6 Header + Extensions */
546 
547 		nh_len = parse_ipv6hdr(skb, key);
548 		if (unlikely(nh_len < 0)) {
549 			if (nh_len == -EINVAL) {
550 				skb->transport_header = skb->network_header;
551 				error = 0;
552 			} else {
553 				error = nh_len;
554 			}
555 			return error;
556 		}
557 
558 		if (key->ip.frag == OVS_FRAG_TYPE_LATER)
559 			return 0;
560 		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
561 			key->ip.frag = OVS_FRAG_TYPE_FIRST;
562 
563 		/* Transport layer. */
564 		if (key->ip.proto == NEXTHDR_TCP) {
565 			if (tcphdr_ok(skb)) {
566 				struct tcphdr *tcp = tcp_hdr(skb);
567 				key->ipv6.tp.src = tcp->source;
568 				key->ipv6.tp.dst = tcp->dest;
569 				key->ipv6.tp.flags = TCP_FLAGS_BE16(tcp);
570 			}
571 		} else if (key->ip.proto == NEXTHDR_UDP) {
572 			if (udphdr_ok(skb)) {
573 				struct udphdr *udp = udp_hdr(skb);
574 				key->ipv6.tp.src = udp->source;
575 				key->ipv6.tp.dst = udp->dest;
576 			}
577 		} else if (key->ip.proto == NEXTHDR_SCTP) {
578 			if (sctphdr_ok(skb)) {
579 				struct sctphdr *sctp = sctp_hdr(skb);
580 				key->ipv6.tp.src = sctp->source;
581 				key->ipv6.tp.dst = sctp->dest;
582 			}
583 		} else if (key->ip.proto == NEXTHDR_ICMP) {
584 			if (icmp6hdr_ok(skb)) {
585 				error = parse_icmpv6(skb, key, nh_len);
586 				if (error)
587 					return error;
588 			}
589 		}
590 	}
591 
592 	return 0;
593 }
594