1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4  *
5  * Development of this code funded by Astaro AG (http://www.astaro.com/)
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/rbtree.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17 
18 struct nft_rbtree {
19 	struct rb_root		root;
20 	rwlock_t		lock;
21 	seqcount_rwlock_t	count;
22 	struct delayed_work	gc_work;
23 };
24 
25 struct nft_rbtree_elem {
26 	struct rb_node		node;
27 	struct nft_set_ext	ext;
28 };
29 
30 static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
31 {
32 	return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
33 	       (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
34 }
35 
36 static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
37 {
38 	return !nft_rbtree_interval_end(rbe);
39 }
40 
41 static int nft_rbtree_cmp(const struct nft_set *set,
42 			  const struct nft_rbtree_elem *e1,
43 			  const struct nft_rbtree_elem *e2)
44 {
45 	return memcmp(nft_set_ext_key(&e1->ext), nft_set_ext_key(&e2->ext),
46 		      set->klen);
47 }
48 
49 static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
50 				const u32 *key, const struct nft_set_ext **ext,
51 				unsigned int seq)
52 {
53 	struct nft_rbtree *priv = nft_set_priv(set);
54 	const struct nft_rbtree_elem *rbe, *interval = NULL;
55 	u8 genmask = nft_genmask_cur(net);
56 	const struct rb_node *parent;
57 	int d;
58 
59 	parent = rcu_dereference_raw(priv->root.rb_node);
60 	while (parent != NULL) {
61 		if (read_seqcount_retry(&priv->count, seq))
62 			return false;
63 
64 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
65 
66 		d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen);
67 		if (d < 0) {
68 			parent = rcu_dereference_raw(parent->rb_left);
69 			if (interval &&
70 			    !nft_rbtree_cmp(set, rbe, interval) &&
71 			    nft_rbtree_interval_end(rbe) &&
72 			    nft_rbtree_interval_start(interval))
73 				continue;
74 			interval = rbe;
75 		} else if (d > 0)
76 			parent = rcu_dereference_raw(parent->rb_right);
77 		else {
78 			if (!nft_set_elem_active(&rbe->ext, genmask)) {
79 				parent = rcu_dereference_raw(parent->rb_left);
80 				continue;
81 			}
82 
83 			if (nft_set_elem_expired(&rbe->ext))
84 				return false;
85 
86 			if (nft_rbtree_interval_end(rbe)) {
87 				if (nft_set_is_anonymous(set))
88 					return false;
89 				parent = rcu_dereference_raw(parent->rb_left);
90 				interval = NULL;
91 				continue;
92 			}
93 
94 			*ext = &rbe->ext;
95 			return true;
96 		}
97 	}
98 
99 	if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
100 	    nft_set_elem_active(&interval->ext, genmask) &&
101 	    !nft_set_elem_expired(&interval->ext) &&
102 	    nft_rbtree_interval_start(interval)) {
103 		*ext = &interval->ext;
104 		return true;
105 	}
106 
107 	return false;
108 }
109 
110 INDIRECT_CALLABLE_SCOPE
111 bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
112 		       const u32 *key, const struct nft_set_ext **ext)
113 {
114 	struct nft_rbtree *priv = nft_set_priv(set);
115 	unsigned int seq = read_seqcount_begin(&priv->count);
116 	bool ret;
117 
118 	ret = __nft_rbtree_lookup(net, set, key, ext, seq);
119 	if (ret || !read_seqcount_retry(&priv->count, seq))
120 		return ret;
121 
122 	read_lock_bh(&priv->lock);
123 	seq = read_seqcount_begin(&priv->count);
124 	ret = __nft_rbtree_lookup(net, set, key, ext, seq);
125 	read_unlock_bh(&priv->lock);
126 
127 	return ret;
128 }
129 
130 static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
131 			     const u32 *key, struct nft_rbtree_elem **elem,
132 			     unsigned int seq, unsigned int flags, u8 genmask)
133 {
134 	struct nft_rbtree_elem *rbe, *interval = NULL;
135 	struct nft_rbtree *priv = nft_set_priv(set);
136 	const struct rb_node *parent;
137 	const void *this;
138 	int d;
139 
140 	parent = rcu_dereference_raw(priv->root.rb_node);
141 	while (parent != NULL) {
142 		if (read_seqcount_retry(&priv->count, seq))
143 			return false;
144 
145 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
146 
147 		this = nft_set_ext_key(&rbe->ext);
148 		d = memcmp(this, key, set->klen);
149 		if (d < 0) {
150 			parent = rcu_dereference_raw(parent->rb_left);
151 			if (!(flags & NFT_SET_ELEM_INTERVAL_END))
152 				interval = rbe;
153 		} else if (d > 0) {
154 			parent = rcu_dereference_raw(parent->rb_right);
155 			if (flags & NFT_SET_ELEM_INTERVAL_END)
156 				interval = rbe;
157 		} else {
158 			if (!nft_set_elem_active(&rbe->ext, genmask)) {
159 				parent = rcu_dereference_raw(parent->rb_left);
160 				continue;
161 			}
162 
163 			if (nft_set_elem_expired(&rbe->ext))
164 				return false;
165 
166 			if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
167 			    (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
168 			    (flags & NFT_SET_ELEM_INTERVAL_END)) {
169 				*elem = rbe;
170 				return true;
171 			}
172 
173 			if (nft_rbtree_interval_end(rbe))
174 				interval = NULL;
175 
176 			parent = rcu_dereference_raw(parent->rb_left);
177 		}
178 	}
179 
180 	if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
181 	    nft_set_elem_active(&interval->ext, genmask) &&
182 	    !nft_set_elem_expired(&interval->ext) &&
183 	    ((!nft_rbtree_interval_end(interval) &&
184 	      !(flags & NFT_SET_ELEM_INTERVAL_END)) ||
185 	     (nft_rbtree_interval_end(interval) &&
186 	      (flags & NFT_SET_ELEM_INTERVAL_END)))) {
187 		*elem = interval;
188 		return true;
189 	}
190 
191 	return false;
192 }
193 
194 static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
195 			    const struct nft_set_elem *elem, unsigned int flags)
196 {
197 	struct nft_rbtree *priv = nft_set_priv(set);
198 	unsigned int seq = read_seqcount_begin(&priv->count);
199 	struct nft_rbtree_elem *rbe = ERR_PTR(-ENOENT);
200 	const u32 *key = (const u32 *)&elem->key.val;
201 	u8 genmask = nft_genmask_cur(net);
202 	bool ret;
203 
204 	ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
205 	if (ret || !read_seqcount_retry(&priv->count, seq))
206 		return rbe;
207 
208 	read_lock_bh(&priv->lock);
209 	seq = read_seqcount_begin(&priv->count);
210 	ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
211 	if (!ret)
212 		rbe = ERR_PTR(-ENOENT);
213 	read_unlock_bh(&priv->lock);
214 
215 	return rbe;
216 }
217 
218 static int nft_rbtree_gc_elem(const struct nft_set *__set,
219 			      struct nft_rbtree *priv,
220 			      struct nft_rbtree_elem *rbe)
221 {
222 	struct nft_set *set = (struct nft_set *)__set;
223 	struct rb_node *prev = rb_prev(&rbe->node);
224 	struct nft_rbtree_elem *rbe_prev = NULL;
225 	struct nft_set_gc_batch *gcb;
226 
227 	gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
228 	if (!gcb)
229 		return -ENOMEM;
230 
231 	/* search for expired end interval coming before this element. */
232 	while (prev) {
233 		rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
234 		if (nft_rbtree_interval_end(rbe_prev))
235 			break;
236 
237 		prev = rb_prev(prev);
238 	}
239 
240 	if (rbe_prev) {
241 		rb_erase(&rbe_prev->node, &priv->root);
242 		atomic_dec(&set->nelems);
243 	}
244 
245 	rb_erase(&rbe->node, &priv->root);
246 	atomic_dec(&set->nelems);
247 
248 	nft_set_gc_batch_add(gcb, rbe);
249 	nft_set_gc_batch_complete(gcb);
250 
251 	return 0;
252 }
253 
254 static bool nft_rbtree_update_first(const struct nft_set *set,
255 				    struct nft_rbtree_elem *rbe,
256 				    struct rb_node *first)
257 {
258 	struct nft_rbtree_elem *first_elem;
259 
260 	first_elem = rb_entry(first, struct nft_rbtree_elem, node);
261 	/* this element is closest to where the new element is to be inserted:
262 	 * update the first element for the node list path.
263 	 */
264 	if (nft_rbtree_cmp(set, rbe, first_elem) < 0)
265 		return true;
266 
267 	return false;
268 }
269 
270 static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
271 			       struct nft_rbtree_elem *new,
272 			       struct nft_set_ext **ext)
273 {
274 	struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
275 	struct rb_node *node, *next, *parent, **p, *first = NULL;
276 	struct nft_rbtree *priv = nft_set_priv(set);
277 	u8 genmask = nft_genmask_next(net);
278 	int d, err;
279 
280 	/* Descend the tree to search for an existing element greater than the
281 	 * key value to insert that is greater than the new element. This is the
282 	 * first element to walk the ordered elements to find possible overlap.
283 	 */
284 	parent = NULL;
285 	p = &priv->root.rb_node;
286 	while (*p != NULL) {
287 		parent = *p;
288 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
289 		d = nft_rbtree_cmp(set, rbe, new);
290 
291 		if (d < 0) {
292 			p = &parent->rb_left;
293 		} else if (d > 0) {
294 			if (!first ||
295 			    nft_rbtree_update_first(set, rbe, first))
296 				first = &rbe->node;
297 
298 			p = &parent->rb_right;
299 		} else {
300 			if (nft_rbtree_interval_end(rbe))
301 				p = &parent->rb_left;
302 			else
303 				p = &parent->rb_right;
304 		}
305 	}
306 
307 	if (!first)
308 		first = rb_first(&priv->root);
309 
310 	/* Detect overlap by going through the list of valid tree nodes.
311 	 * Values stored in the tree are in reversed order, starting from
312 	 * highest to lowest value.
313 	 */
314 	for (node = first; node != NULL; node = next) {
315 		next = rb_next(node);
316 
317 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
318 
319 		if (!nft_set_elem_active(&rbe->ext, genmask))
320 			continue;
321 
322 		/* perform garbage collection to avoid bogus overlap reports. */
323 		if (nft_set_elem_expired(&rbe->ext)) {
324 			err = nft_rbtree_gc_elem(set, priv, rbe);
325 			if (err < 0)
326 				return err;
327 
328 			continue;
329 		}
330 
331 		d = nft_rbtree_cmp(set, rbe, new);
332 		if (d == 0) {
333 			/* Matching end element: no need to look for an
334 			 * overlapping greater or equal element.
335 			 */
336 			if (nft_rbtree_interval_end(rbe)) {
337 				rbe_le = rbe;
338 				break;
339 			}
340 
341 			/* first element that is greater or equal to key value. */
342 			if (!rbe_ge) {
343 				rbe_ge = rbe;
344 				continue;
345 			}
346 
347 			/* this is a closer more or equal element, update it. */
348 			if (nft_rbtree_cmp(set, rbe_ge, new) != 0) {
349 				rbe_ge = rbe;
350 				continue;
351 			}
352 
353 			/* element is equal to key value, make sure flags are
354 			 * the same, an existing more or equal start element
355 			 * must not be replaced by more or equal end element.
356 			 */
357 			if ((nft_rbtree_interval_start(new) &&
358 			     nft_rbtree_interval_start(rbe_ge)) ||
359 			    (nft_rbtree_interval_end(new) &&
360 			     nft_rbtree_interval_end(rbe_ge))) {
361 				rbe_ge = rbe;
362 				continue;
363 			}
364 		} else if (d > 0) {
365 			/* annotate element greater than the new element. */
366 			rbe_ge = rbe;
367 			continue;
368 		} else if (d < 0) {
369 			/* annotate element less than the new element. */
370 			rbe_le = rbe;
371 			break;
372 		}
373 	}
374 
375 	/* - new start element matching existing start element: full overlap
376 	 *   reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
377 	 */
378 	if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) &&
379 	    nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) {
380 		*ext = &rbe_ge->ext;
381 		return -EEXIST;
382 	}
383 
384 	/* - new end element matching existing end element: full overlap
385 	 *   reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
386 	 */
387 	if (rbe_le && !nft_rbtree_cmp(set, new, rbe_le) &&
388 	    nft_rbtree_interval_end(rbe_le) == nft_rbtree_interval_end(new)) {
389 		*ext = &rbe_le->ext;
390 		return -EEXIST;
391 	}
392 
393 	/* - new start element with existing closest, less or equal key value
394 	 *   being a start element: partial overlap, reported as -ENOTEMPTY.
395 	 *   Anonymous sets allow for two consecutive start element since they
396 	 *   are constant, skip them to avoid bogus overlap reports.
397 	 */
398 	if (!nft_set_is_anonymous(set) && rbe_le &&
399 	    nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new))
400 		return -ENOTEMPTY;
401 
402 	/* - new end element with existing closest, less or equal key value
403 	 *   being a end element: partial overlap, reported as -ENOTEMPTY.
404 	 */
405 	if (rbe_le &&
406 	    nft_rbtree_interval_end(rbe_le) && nft_rbtree_interval_end(new))
407 		return -ENOTEMPTY;
408 
409 	/* - new end element with existing closest, greater or equal key value
410 	 *   being an end element: partial overlap, reported as -ENOTEMPTY
411 	 */
412 	if (rbe_ge &&
413 	    nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new))
414 		return -ENOTEMPTY;
415 
416 	/* Accepted element: pick insertion point depending on key value */
417 	parent = NULL;
418 	p = &priv->root.rb_node;
419 	while (*p != NULL) {
420 		parent = *p;
421 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
422 		d = nft_rbtree_cmp(set, rbe, new);
423 
424 		if (d < 0)
425 			p = &parent->rb_left;
426 		else if (d > 0)
427 			p = &parent->rb_right;
428 		else if (nft_rbtree_interval_end(rbe))
429 			p = &parent->rb_left;
430 		else
431 			p = &parent->rb_right;
432 	}
433 
434 	rb_link_node_rcu(&new->node, parent, p);
435 	rb_insert_color(&new->node, &priv->root);
436 	return 0;
437 }
438 
439 static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
440 			     const struct nft_set_elem *elem,
441 			     struct nft_set_ext **ext)
442 {
443 	struct nft_rbtree *priv = nft_set_priv(set);
444 	struct nft_rbtree_elem *rbe = elem->priv;
445 	int err;
446 
447 	write_lock_bh(&priv->lock);
448 	write_seqcount_begin(&priv->count);
449 	err = __nft_rbtree_insert(net, set, rbe, ext);
450 	write_seqcount_end(&priv->count);
451 	write_unlock_bh(&priv->lock);
452 
453 	return err;
454 }
455 
456 static void nft_rbtree_remove(const struct net *net,
457 			      const struct nft_set *set,
458 			      const struct nft_set_elem *elem)
459 {
460 	struct nft_rbtree *priv = nft_set_priv(set);
461 	struct nft_rbtree_elem *rbe = elem->priv;
462 
463 	write_lock_bh(&priv->lock);
464 	write_seqcount_begin(&priv->count);
465 	rb_erase(&rbe->node, &priv->root);
466 	write_seqcount_end(&priv->count);
467 	write_unlock_bh(&priv->lock);
468 }
469 
470 static void nft_rbtree_activate(const struct net *net,
471 				const struct nft_set *set,
472 				const struct nft_set_elem *elem)
473 {
474 	struct nft_rbtree_elem *rbe = elem->priv;
475 
476 	nft_set_elem_change_active(net, set, &rbe->ext);
477 	nft_set_elem_clear_busy(&rbe->ext);
478 }
479 
480 static bool nft_rbtree_flush(const struct net *net,
481 			     const struct nft_set *set, void *priv)
482 {
483 	struct nft_rbtree_elem *rbe = priv;
484 
485 	if (!nft_set_elem_mark_busy(&rbe->ext) ||
486 	    !nft_is_active(net, &rbe->ext)) {
487 		nft_set_elem_change_active(net, set, &rbe->ext);
488 		return true;
489 	}
490 	return false;
491 }
492 
493 static void *nft_rbtree_deactivate(const struct net *net,
494 				   const struct nft_set *set,
495 				   const struct nft_set_elem *elem)
496 {
497 	const struct nft_rbtree *priv = nft_set_priv(set);
498 	const struct rb_node *parent = priv->root.rb_node;
499 	struct nft_rbtree_elem *rbe, *this = elem->priv;
500 	u8 genmask = nft_genmask_next(net);
501 	int d;
502 
503 	while (parent != NULL) {
504 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
505 
506 		d = memcmp(nft_set_ext_key(&rbe->ext), &elem->key.val,
507 					   set->klen);
508 		if (d < 0)
509 			parent = parent->rb_left;
510 		else if (d > 0)
511 			parent = parent->rb_right;
512 		else {
513 			if (nft_rbtree_interval_end(rbe) &&
514 			    nft_rbtree_interval_start(this)) {
515 				parent = parent->rb_left;
516 				continue;
517 			} else if (nft_rbtree_interval_start(rbe) &&
518 				   nft_rbtree_interval_end(this)) {
519 				parent = parent->rb_right;
520 				continue;
521 			} else if (!nft_set_elem_active(&rbe->ext, genmask)) {
522 				parent = parent->rb_left;
523 				continue;
524 			}
525 			nft_rbtree_flush(net, set, rbe);
526 			return rbe;
527 		}
528 	}
529 	return NULL;
530 }
531 
532 static void nft_rbtree_walk(const struct nft_ctx *ctx,
533 			    struct nft_set *set,
534 			    struct nft_set_iter *iter)
535 {
536 	struct nft_rbtree *priv = nft_set_priv(set);
537 	struct nft_rbtree_elem *rbe;
538 	struct nft_set_elem elem;
539 	struct rb_node *node;
540 
541 	read_lock_bh(&priv->lock);
542 	for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
543 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
544 
545 		if (iter->count < iter->skip)
546 			goto cont;
547 		if (nft_set_elem_expired(&rbe->ext))
548 			goto cont;
549 		if (!nft_set_elem_active(&rbe->ext, iter->genmask))
550 			goto cont;
551 
552 		elem.priv = rbe;
553 
554 		iter->err = iter->fn(ctx, set, iter, &elem);
555 		if (iter->err < 0) {
556 			read_unlock_bh(&priv->lock);
557 			return;
558 		}
559 cont:
560 		iter->count++;
561 	}
562 	read_unlock_bh(&priv->lock);
563 }
564 
565 static void nft_rbtree_gc(struct work_struct *work)
566 {
567 	struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
568 	struct nft_set_gc_batch *gcb = NULL;
569 	struct nft_rbtree *priv;
570 	struct rb_node *node;
571 	struct nft_set *set;
572 	struct net *net;
573 	u8 genmask;
574 
575 	priv = container_of(work, struct nft_rbtree, gc_work.work);
576 	set  = nft_set_container_of(priv);
577 	net  = read_pnet(&set->net);
578 	genmask = nft_genmask_cur(net);
579 
580 	write_lock_bh(&priv->lock);
581 	write_seqcount_begin(&priv->count);
582 	for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
583 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
584 
585 		if (!nft_set_elem_active(&rbe->ext, genmask))
586 			continue;
587 
588 		/* elements are reversed in the rbtree for historical reasons,
589 		 * from highest to lowest value, that is why end element is
590 		 * always visited before the start element.
591 		 */
592 		if (nft_rbtree_interval_end(rbe)) {
593 			rbe_end = rbe;
594 			continue;
595 		}
596 		if (!nft_set_elem_expired(&rbe->ext))
597 			continue;
598 
599 		if (nft_set_elem_mark_busy(&rbe->ext)) {
600 			rbe_end = NULL;
601 			continue;
602 		}
603 
604 		if (rbe_prev) {
605 			rb_erase(&rbe_prev->node, &priv->root);
606 			rbe_prev = NULL;
607 		}
608 		gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
609 		if (!gcb)
610 			break;
611 
612 		atomic_dec(&set->nelems);
613 		nft_set_gc_batch_add(gcb, rbe);
614 		rbe_prev = rbe;
615 
616 		if (rbe_end) {
617 			atomic_dec(&set->nelems);
618 			nft_set_gc_batch_add(gcb, rbe_end);
619 			rb_erase(&rbe_end->node, &priv->root);
620 			rbe_end = NULL;
621 		}
622 		node = rb_next(node);
623 		if (!node)
624 			break;
625 	}
626 	if (rbe_prev)
627 		rb_erase(&rbe_prev->node, &priv->root);
628 	write_seqcount_end(&priv->count);
629 	write_unlock_bh(&priv->lock);
630 
631 	rbe = nft_set_catchall_gc(set);
632 	if (rbe) {
633 		gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
634 		if (gcb)
635 			nft_set_gc_batch_add(gcb, rbe);
636 	}
637 	nft_set_gc_batch_complete(gcb);
638 
639 	queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
640 			   nft_set_gc_interval(set));
641 }
642 
643 static u64 nft_rbtree_privsize(const struct nlattr * const nla[],
644 			       const struct nft_set_desc *desc)
645 {
646 	return sizeof(struct nft_rbtree);
647 }
648 
649 static int nft_rbtree_init(const struct nft_set *set,
650 			   const struct nft_set_desc *desc,
651 			   const struct nlattr * const nla[])
652 {
653 	struct nft_rbtree *priv = nft_set_priv(set);
654 
655 	rwlock_init(&priv->lock);
656 	seqcount_rwlock_init(&priv->count, &priv->lock);
657 	priv->root = RB_ROOT;
658 
659 	INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rbtree_gc);
660 	if (set->flags & NFT_SET_TIMEOUT)
661 		queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
662 				   nft_set_gc_interval(set));
663 
664 	return 0;
665 }
666 
667 static void nft_rbtree_destroy(const struct nft_ctx *ctx,
668 			       const struct nft_set *set)
669 {
670 	struct nft_rbtree *priv = nft_set_priv(set);
671 	struct nft_rbtree_elem *rbe;
672 	struct rb_node *node;
673 
674 	cancel_delayed_work_sync(&priv->gc_work);
675 	rcu_barrier();
676 	while ((node = priv->root.rb_node) != NULL) {
677 		rb_erase(node, &priv->root);
678 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
679 		nf_tables_set_elem_destroy(ctx, set, rbe);
680 	}
681 }
682 
683 static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
684 				struct nft_set_estimate *est)
685 {
686 	if (desc->field_count > 1)
687 		return false;
688 
689 	if (desc->size)
690 		est->size = sizeof(struct nft_rbtree) +
691 			    desc->size * sizeof(struct nft_rbtree_elem);
692 	else
693 		est->size = ~0;
694 
695 	est->lookup = NFT_SET_CLASS_O_LOG_N;
696 	est->space  = NFT_SET_CLASS_O_N;
697 
698 	return true;
699 }
700 
701 const struct nft_set_type nft_set_rbtree_type = {
702 	.features	= NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
703 	.ops		= {
704 		.privsize	= nft_rbtree_privsize,
705 		.elemsize	= offsetof(struct nft_rbtree_elem, ext),
706 		.estimate	= nft_rbtree_estimate,
707 		.init		= nft_rbtree_init,
708 		.destroy	= nft_rbtree_destroy,
709 		.insert		= nft_rbtree_insert,
710 		.remove		= nft_rbtree_remove,
711 		.deactivate	= nft_rbtree_deactivate,
712 		.flush		= nft_rbtree_flush,
713 		.activate	= nft_rbtree_activate,
714 		.lookup		= nft_rbtree_lookup,
715 		.walk		= nft_rbtree_walk,
716 		.get		= nft_rbtree_get,
717 	},
718 };
719