xref: /openbmc/linux/net/sched/cls_flow.c (revision 62975d27)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_flow.c		Generic flow classifier
4  *
5  * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/list.h>
11 #include <linux/jhash.h>
12 #include <linux/random.h>
13 #include <linux/pkt_cls.h>
14 #include <linux/skbuff.h>
15 #include <linux/in.h>
16 #include <linux/ip.h>
17 #include <linux/ipv6.h>
18 #include <linux/if_vlan.h>
19 #include <linux/slab.h>
20 #include <linux/module.h>
21 #include <net/inet_sock.h>
22 
23 #include <net/pkt_cls.h>
24 #include <net/ip.h>
25 #include <net/route.h>
26 #include <net/flow_dissector.h>
27 
28 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
29 #include <net/netfilter/nf_conntrack.h>
30 #endif
31 
32 struct flow_head {
33 	struct list_head	filters;
34 	struct rcu_head		rcu;
35 };
36 
37 struct flow_filter {
38 	struct list_head	list;
39 	struct tcf_exts		exts;
40 	struct tcf_ematch_tree	ematches;
41 	struct tcf_proto	*tp;
42 	struct timer_list	perturb_timer;
43 	u32			perturb_period;
44 	u32			handle;
45 
46 	u32			nkeys;
47 	u32			keymask;
48 	u32			mode;
49 	u32			mask;
50 	u32			xor;
51 	u32			rshift;
52 	u32			addend;
53 	u32			divisor;
54 	u32			baseclass;
55 	u32			hashrnd;
56 	struct rcu_work		rwork;
57 };
58 
59 static inline u32 addr_fold(void *addr)
60 {
61 	unsigned long a = (unsigned long)addr;
62 
63 	return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
64 }
65 
66 static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
67 {
68 	__be32 src = flow_get_u32_src(flow);
69 
70 	if (src)
71 		return ntohl(src);
72 
73 	return addr_fold(skb->sk);
74 }
75 
76 static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
77 {
78 	__be32 dst = flow_get_u32_dst(flow);
79 
80 	if (dst)
81 		return ntohl(dst);
82 
83 	return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
84 }
85 
86 static u32 flow_get_proto(const struct sk_buff *skb,
87 			  const struct flow_keys *flow)
88 {
89 	return flow->basic.ip_proto;
90 }
91 
92 static u32 flow_get_proto_src(const struct sk_buff *skb,
93 			      const struct flow_keys *flow)
94 {
95 	if (flow->ports.ports)
96 		return ntohs(flow->ports.src);
97 
98 	return addr_fold(skb->sk);
99 }
100 
101 static u32 flow_get_proto_dst(const struct sk_buff *skb,
102 			      const struct flow_keys *flow)
103 {
104 	if (flow->ports.ports)
105 		return ntohs(flow->ports.dst);
106 
107 	return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
108 }
109 
110 static u32 flow_get_iif(const struct sk_buff *skb)
111 {
112 	return skb->skb_iif;
113 }
114 
115 static u32 flow_get_priority(const struct sk_buff *skb)
116 {
117 	return skb->priority;
118 }
119 
120 static u32 flow_get_mark(const struct sk_buff *skb)
121 {
122 	return skb->mark;
123 }
124 
125 static u32 flow_get_nfct(const struct sk_buff *skb)
126 {
127 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
128 	return addr_fold(skb_nfct(skb));
129 #else
130 	return 0;
131 #endif
132 }
133 
134 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
135 #define CTTUPLE(skb, member)						\
136 ({									\
137 	enum ip_conntrack_info ctinfo;					\
138 	const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);		\
139 	if (ct == NULL)							\
140 		goto fallback;						\
141 	ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member;			\
142 })
143 #else
144 #define CTTUPLE(skb, member)						\
145 ({									\
146 	goto fallback;							\
147 	0;								\
148 })
149 #endif
150 
151 static u32 flow_get_nfct_src(const struct sk_buff *skb,
152 			     const struct flow_keys *flow)
153 {
154 	switch (skb_protocol(skb, true)) {
155 	case htons(ETH_P_IP):
156 		return ntohl(CTTUPLE(skb, src.u3.ip));
157 	case htons(ETH_P_IPV6):
158 		return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
159 	}
160 fallback:
161 	return flow_get_src(skb, flow);
162 }
163 
164 static u32 flow_get_nfct_dst(const struct sk_buff *skb,
165 			     const struct flow_keys *flow)
166 {
167 	switch (skb_protocol(skb, true)) {
168 	case htons(ETH_P_IP):
169 		return ntohl(CTTUPLE(skb, dst.u3.ip));
170 	case htons(ETH_P_IPV6):
171 		return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
172 	}
173 fallback:
174 	return flow_get_dst(skb, flow);
175 }
176 
177 static u32 flow_get_nfct_proto_src(const struct sk_buff *skb,
178 				   const struct flow_keys *flow)
179 {
180 	return ntohs(CTTUPLE(skb, src.u.all));
181 fallback:
182 	return flow_get_proto_src(skb, flow);
183 }
184 
185 static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb,
186 				   const struct flow_keys *flow)
187 {
188 	return ntohs(CTTUPLE(skb, dst.u.all));
189 fallback:
190 	return flow_get_proto_dst(skb, flow);
191 }
192 
193 static u32 flow_get_rtclassid(const struct sk_buff *skb)
194 {
195 #ifdef CONFIG_IP_ROUTE_CLASSID
196 	if (skb_dst(skb))
197 		return skb_dst(skb)->tclassid;
198 #endif
199 	return 0;
200 }
201 
202 static u32 flow_get_skuid(const struct sk_buff *skb)
203 {
204 	struct sock *sk = skb_to_full_sk(skb);
205 
206 	if (sk && sk->sk_socket && sk->sk_socket->file) {
207 		kuid_t skuid = sk->sk_socket->file->f_cred->fsuid;
208 
209 		return from_kuid(&init_user_ns, skuid);
210 	}
211 	return 0;
212 }
213 
214 static u32 flow_get_skgid(const struct sk_buff *skb)
215 {
216 	struct sock *sk = skb_to_full_sk(skb);
217 
218 	if (sk && sk->sk_socket && sk->sk_socket->file) {
219 		kgid_t skgid = sk->sk_socket->file->f_cred->fsgid;
220 
221 		return from_kgid(&init_user_ns, skgid);
222 	}
223 	return 0;
224 }
225 
226 static u32 flow_get_vlan_tag(const struct sk_buff *skb)
227 {
228 	u16 uninitialized_var(tag);
229 
230 	if (vlan_get_tag(skb, &tag) < 0)
231 		return 0;
232 	return tag & VLAN_VID_MASK;
233 }
234 
235 static u32 flow_get_rxhash(struct sk_buff *skb)
236 {
237 	return skb_get_hash(skb);
238 }
239 
240 static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
241 {
242 	switch (key) {
243 	case FLOW_KEY_SRC:
244 		return flow_get_src(skb, flow);
245 	case FLOW_KEY_DST:
246 		return flow_get_dst(skb, flow);
247 	case FLOW_KEY_PROTO:
248 		return flow_get_proto(skb, flow);
249 	case FLOW_KEY_PROTO_SRC:
250 		return flow_get_proto_src(skb, flow);
251 	case FLOW_KEY_PROTO_DST:
252 		return flow_get_proto_dst(skb, flow);
253 	case FLOW_KEY_IIF:
254 		return flow_get_iif(skb);
255 	case FLOW_KEY_PRIORITY:
256 		return flow_get_priority(skb);
257 	case FLOW_KEY_MARK:
258 		return flow_get_mark(skb);
259 	case FLOW_KEY_NFCT:
260 		return flow_get_nfct(skb);
261 	case FLOW_KEY_NFCT_SRC:
262 		return flow_get_nfct_src(skb, flow);
263 	case FLOW_KEY_NFCT_DST:
264 		return flow_get_nfct_dst(skb, flow);
265 	case FLOW_KEY_NFCT_PROTO_SRC:
266 		return flow_get_nfct_proto_src(skb, flow);
267 	case FLOW_KEY_NFCT_PROTO_DST:
268 		return flow_get_nfct_proto_dst(skb, flow);
269 	case FLOW_KEY_RTCLASSID:
270 		return flow_get_rtclassid(skb);
271 	case FLOW_KEY_SKUID:
272 		return flow_get_skuid(skb);
273 	case FLOW_KEY_SKGID:
274 		return flow_get_skgid(skb);
275 	case FLOW_KEY_VLAN_TAG:
276 		return flow_get_vlan_tag(skb);
277 	case FLOW_KEY_RXHASH:
278 		return flow_get_rxhash(skb);
279 	default:
280 		WARN_ON(1);
281 		return 0;
282 	}
283 }
284 
285 #define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | 		\
286 			  (1 << FLOW_KEY_DST) |			\
287 			  (1 << FLOW_KEY_PROTO) |		\
288 			  (1 << FLOW_KEY_PROTO_SRC) |		\
289 			  (1 << FLOW_KEY_PROTO_DST) | 		\
290 			  (1 << FLOW_KEY_NFCT_SRC) |		\
291 			  (1 << FLOW_KEY_NFCT_DST) |		\
292 			  (1 << FLOW_KEY_NFCT_PROTO_SRC) |	\
293 			  (1 << FLOW_KEY_NFCT_PROTO_DST))
294 
295 static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
296 			 struct tcf_result *res)
297 {
298 	struct flow_head *head = rcu_dereference_bh(tp->root);
299 	struct flow_filter *f;
300 	u32 keymask;
301 	u32 classid;
302 	unsigned int n, key;
303 	int r;
304 
305 	list_for_each_entry_rcu(f, &head->filters, list) {
306 		u32 keys[FLOW_KEY_MAX + 1];
307 		struct flow_keys flow_keys;
308 
309 		if (!tcf_em_tree_match(skb, &f->ematches, NULL))
310 			continue;
311 
312 		keymask = f->keymask;
313 		if (keymask & FLOW_KEYS_NEEDED)
314 			skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
315 
316 		for (n = 0; n < f->nkeys; n++) {
317 			key = ffs(keymask) - 1;
318 			keymask &= ~(1 << key);
319 			keys[n] = flow_key_get(skb, key, &flow_keys);
320 		}
321 
322 		if (f->mode == FLOW_MODE_HASH)
323 			classid = jhash2(keys, f->nkeys, f->hashrnd);
324 		else {
325 			classid = keys[0];
326 			classid = (classid & f->mask) ^ f->xor;
327 			classid = (classid >> f->rshift) + f->addend;
328 		}
329 
330 		if (f->divisor)
331 			classid %= f->divisor;
332 
333 		res->class   = 0;
334 		res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid);
335 
336 		r = tcf_exts_exec(skb, &f->exts, res);
337 		if (r < 0)
338 			continue;
339 		return r;
340 	}
341 	return -1;
342 }
343 
344 static void flow_perturbation(struct timer_list *t)
345 {
346 	struct flow_filter *f = from_timer(f, t, perturb_timer);
347 
348 	get_random_bytes(&f->hashrnd, 4);
349 	if (f->perturb_period)
350 		mod_timer(&f->perturb_timer, jiffies + f->perturb_period);
351 }
352 
353 static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
354 	[TCA_FLOW_KEYS]		= { .type = NLA_U32 },
355 	[TCA_FLOW_MODE]		= { .type = NLA_U32 },
356 	[TCA_FLOW_BASECLASS]	= { .type = NLA_U32 },
357 	[TCA_FLOW_RSHIFT]	= { .type = NLA_U32 },
358 	[TCA_FLOW_ADDEND]	= { .type = NLA_U32 },
359 	[TCA_FLOW_MASK]		= { .type = NLA_U32 },
360 	[TCA_FLOW_XOR]		= { .type = NLA_U32 },
361 	[TCA_FLOW_DIVISOR]	= { .type = NLA_U32 },
362 	[TCA_FLOW_ACT]		= { .type = NLA_NESTED },
363 	[TCA_FLOW_POLICE]	= { .type = NLA_NESTED },
364 	[TCA_FLOW_EMATCHES]	= { .type = NLA_NESTED },
365 	[TCA_FLOW_PERTURB]	= { .type = NLA_U32 },
366 };
367 
368 static void __flow_destroy_filter(struct flow_filter *f)
369 {
370 	del_timer_sync(&f->perturb_timer);
371 	tcf_exts_destroy(&f->exts);
372 	tcf_em_tree_destroy(&f->ematches);
373 	tcf_exts_put_net(&f->exts);
374 	kfree(f);
375 }
376 
377 static void flow_destroy_filter_work(struct work_struct *work)
378 {
379 	struct flow_filter *f = container_of(to_rcu_work(work),
380 					     struct flow_filter,
381 					     rwork);
382 	rtnl_lock();
383 	__flow_destroy_filter(f);
384 	rtnl_unlock();
385 }
386 
387 static int flow_change(struct net *net, struct sk_buff *in_skb,
388 		       struct tcf_proto *tp, unsigned long base,
389 		       u32 handle, struct nlattr **tca,
390 		       void **arg, bool ovr, bool rtnl_held,
391 		       struct netlink_ext_ack *extack)
392 {
393 	struct flow_head *head = rtnl_dereference(tp->root);
394 	struct flow_filter *fold, *fnew;
395 	struct nlattr *opt = tca[TCA_OPTIONS];
396 	struct nlattr *tb[TCA_FLOW_MAX + 1];
397 	unsigned int nkeys = 0;
398 	unsigned int perturb_period = 0;
399 	u32 baseclass = 0;
400 	u32 keymask = 0;
401 	u32 mode;
402 	int err;
403 
404 	if (opt == NULL)
405 		return -EINVAL;
406 
407 	err = nla_parse_nested_deprecated(tb, TCA_FLOW_MAX, opt, flow_policy,
408 					  NULL);
409 	if (err < 0)
410 		return err;
411 
412 	if (tb[TCA_FLOW_BASECLASS]) {
413 		baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]);
414 		if (TC_H_MIN(baseclass) == 0)
415 			return -EINVAL;
416 	}
417 
418 	if (tb[TCA_FLOW_KEYS]) {
419 		keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
420 
421 		nkeys = hweight32(keymask);
422 		if (nkeys == 0)
423 			return -EINVAL;
424 
425 		if (fls(keymask) - 1 > FLOW_KEY_MAX)
426 			return -EOPNOTSUPP;
427 
428 		if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) &&
429 		    sk_user_ns(NETLINK_CB(in_skb).sk) != &init_user_ns)
430 			return -EOPNOTSUPP;
431 	}
432 
433 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
434 	if (!fnew)
435 		return -ENOBUFS;
436 
437 	err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches);
438 	if (err < 0)
439 		goto err1;
440 
441 	err = tcf_exts_init(&fnew->exts, net, TCA_FLOW_ACT, TCA_FLOW_POLICE);
442 	if (err < 0)
443 		goto err2;
444 
445 	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, ovr,
446 				true, extack);
447 	if (err < 0)
448 		goto err2;
449 
450 	fold = *arg;
451 	if (fold) {
452 		err = -EINVAL;
453 		if (fold->handle != handle && handle)
454 			goto err2;
455 
456 		/* Copy fold into fnew */
457 		fnew->tp = fold->tp;
458 		fnew->handle = fold->handle;
459 		fnew->nkeys = fold->nkeys;
460 		fnew->keymask = fold->keymask;
461 		fnew->mode = fold->mode;
462 		fnew->mask = fold->mask;
463 		fnew->xor = fold->xor;
464 		fnew->rshift = fold->rshift;
465 		fnew->addend = fold->addend;
466 		fnew->divisor = fold->divisor;
467 		fnew->baseclass = fold->baseclass;
468 		fnew->hashrnd = fold->hashrnd;
469 
470 		mode = fold->mode;
471 		if (tb[TCA_FLOW_MODE])
472 			mode = nla_get_u32(tb[TCA_FLOW_MODE]);
473 		if (mode != FLOW_MODE_HASH && nkeys > 1)
474 			goto err2;
475 
476 		if (mode == FLOW_MODE_HASH)
477 			perturb_period = fold->perturb_period;
478 		if (tb[TCA_FLOW_PERTURB]) {
479 			if (mode != FLOW_MODE_HASH)
480 				goto err2;
481 			perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
482 		}
483 	} else {
484 		err = -EINVAL;
485 		if (!handle)
486 			goto err2;
487 		if (!tb[TCA_FLOW_KEYS])
488 			goto err2;
489 
490 		mode = FLOW_MODE_MAP;
491 		if (tb[TCA_FLOW_MODE])
492 			mode = nla_get_u32(tb[TCA_FLOW_MODE]);
493 		if (mode != FLOW_MODE_HASH && nkeys > 1)
494 			goto err2;
495 
496 		if (tb[TCA_FLOW_PERTURB]) {
497 			if (mode != FLOW_MODE_HASH)
498 				goto err2;
499 			perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
500 		}
501 
502 		if (TC_H_MAJ(baseclass) == 0) {
503 			struct Qdisc *q = tcf_block_q(tp->chain->block);
504 
505 			baseclass = TC_H_MAKE(q->handle, baseclass);
506 		}
507 		if (TC_H_MIN(baseclass) == 0)
508 			baseclass = TC_H_MAKE(baseclass, 1);
509 
510 		fnew->handle = handle;
511 		fnew->mask  = ~0U;
512 		fnew->tp = tp;
513 		get_random_bytes(&fnew->hashrnd, 4);
514 	}
515 
516 	timer_setup(&fnew->perturb_timer, flow_perturbation, TIMER_DEFERRABLE);
517 
518 	tcf_block_netif_keep_dst(tp->chain->block);
519 
520 	if (tb[TCA_FLOW_KEYS]) {
521 		fnew->keymask = keymask;
522 		fnew->nkeys   = nkeys;
523 	}
524 
525 	fnew->mode = mode;
526 
527 	if (tb[TCA_FLOW_MASK])
528 		fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
529 	if (tb[TCA_FLOW_XOR])
530 		fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
531 	if (tb[TCA_FLOW_RSHIFT])
532 		fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
533 	if (tb[TCA_FLOW_ADDEND])
534 		fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
535 
536 	if (tb[TCA_FLOW_DIVISOR])
537 		fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
538 	if (baseclass)
539 		fnew->baseclass = baseclass;
540 
541 	fnew->perturb_period = perturb_period;
542 	if (perturb_period)
543 		mod_timer(&fnew->perturb_timer, jiffies + perturb_period);
544 
545 	if (!*arg)
546 		list_add_tail_rcu(&fnew->list, &head->filters);
547 	else
548 		list_replace_rcu(&fold->list, &fnew->list);
549 
550 	*arg = fnew;
551 
552 	if (fold) {
553 		tcf_exts_get_net(&fold->exts);
554 		tcf_queue_work(&fold->rwork, flow_destroy_filter_work);
555 	}
556 	return 0;
557 
558 err2:
559 	tcf_exts_destroy(&fnew->exts);
560 	tcf_em_tree_destroy(&fnew->ematches);
561 err1:
562 	kfree(fnew);
563 	return err;
564 }
565 
566 static int flow_delete(struct tcf_proto *tp, void *arg, bool *last,
567 		       bool rtnl_held, struct netlink_ext_ack *extack)
568 {
569 	struct flow_head *head = rtnl_dereference(tp->root);
570 	struct flow_filter *f = arg;
571 
572 	list_del_rcu(&f->list);
573 	tcf_exts_get_net(&f->exts);
574 	tcf_queue_work(&f->rwork, flow_destroy_filter_work);
575 	*last = list_empty(&head->filters);
576 	return 0;
577 }
578 
579 static int flow_init(struct tcf_proto *tp)
580 {
581 	struct flow_head *head;
582 
583 	head = kzalloc(sizeof(*head), GFP_KERNEL);
584 	if (head == NULL)
585 		return -ENOBUFS;
586 	INIT_LIST_HEAD(&head->filters);
587 	rcu_assign_pointer(tp->root, head);
588 	return 0;
589 }
590 
591 static void flow_destroy(struct tcf_proto *tp, bool rtnl_held,
592 			 struct netlink_ext_ack *extack)
593 {
594 	struct flow_head *head = rtnl_dereference(tp->root);
595 	struct flow_filter *f, *next;
596 
597 	list_for_each_entry_safe(f, next, &head->filters, list) {
598 		list_del_rcu(&f->list);
599 		if (tcf_exts_get_net(&f->exts))
600 			tcf_queue_work(&f->rwork, flow_destroy_filter_work);
601 		else
602 			__flow_destroy_filter(f);
603 	}
604 	kfree_rcu(head, rcu);
605 }
606 
607 static void *flow_get(struct tcf_proto *tp, u32 handle)
608 {
609 	struct flow_head *head = rtnl_dereference(tp->root);
610 	struct flow_filter *f;
611 
612 	list_for_each_entry(f, &head->filters, list)
613 		if (f->handle == handle)
614 			return f;
615 	return NULL;
616 }
617 
618 static int flow_dump(struct net *net, struct tcf_proto *tp, void *fh,
619 		     struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
620 {
621 	struct flow_filter *f = fh;
622 	struct nlattr *nest;
623 
624 	if (f == NULL)
625 		return skb->len;
626 
627 	t->tcm_handle = f->handle;
628 
629 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
630 	if (nest == NULL)
631 		goto nla_put_failure;
632 
633 	if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) ||
634 	    nla_put_u32(skb, TCA_FLOW_MODE, f->mode))
635 		goto nla_put_failure;
636 
637 	if (f->mask != ~0 || f->xor != 0) {
638 		if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) ||
639 		    nla_put_u32(skb, TCA_FLOW_XOR, f->xor))
640 			goto nla_put_failure;
641 	}
642 	if (f->rshift &&
643 	    nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift))
644 		goto nla_put_failure;
645 	if (f->addend &&
646 	    nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend))
647 		goto nla_put_failure;
648 
649 	if (f->divisor &&
650 	    nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor))
651 		goto nla_put_failure;
652 	if (f->baseclass &&
653 	    nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass))
654 		goto nla_put_failure;
655 
656 	if (f->perturb_period &&
657 	    nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
658 		goto nla_put_failure;
659 
660 	if (tcf_exts_dump(skb, &f->exts) < 0)
661 		goto nla_put_failure;
662 #ifdef CONFIG_NET_EMATCH
663 	if (f->ematches.hdr.nmatches &&
664 	    tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0)
665 		goto nla_put_failure;
666 #endif
667 	nla_nest_end(skb, nest);
668 
669 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
670 		goto nla_put_failure;
671 
672 	return skb->len;
673 
674 nla_put_failure:
675 	nla_nest_cancel(skb, nest);
676 	return -1;
677 }
678 
679 static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg,
680 		      bool rtnl_held)
681 {
682 	struct flow_head *head = rtnl_dereference(tp->root);
683 	struct flow_filter *f;
684 
685 	list_for_each_entry(f, &head->filters, list) {
686 		if (arg->count < arg->skip)
687 			goto skip;
688 		if (arg->fn(tp, f, arg) < 0) {
689 			arg->stop = 1;
690 			break;
691 		}
692 skip:
693 		arg->count++;
694 	}
695 }
696 
697 static struct tcf_proto_ops cls_flow_ops __read_mostly = {
698 	.kind		= "flow",
699 	.classify	= flow_classify,
700 	.init		= flow_init,
701 	.destroy	= flow_destroy,
702 	.change		= flow_change,
703 	.delete		= flow_delete,
704 	.get		= flow_get,
705 	.dump		= flow_dump,
706 	.walk		= flow_walk,
707 	.owner		= THIS_MODULE,
708 };
709 
710 static int __init cls_flow_init(void)
711 {
712 	return register_tcf_proto_ops(&cls_flow_ops);
713 }
714 
715 static void __exit cls_flow_exit(void)
716 {
717 	unregister_tcf_proto_ops(&cls_flow_ops);
718 }
719 
720 module_init(cls_flow_init);
721 module_exit(cls_flow_exit);
722 
723 MODULE_LICENSE("GPL");
724 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
725 MODULE_DESCRIPTION("TC flow classifier");
726