1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4  *
5  * Development of this code funded by Astaro AG (http://www.astaro.com/)
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/rbtree.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17 
18 struct nft_rbtree {
19 	struct rb_root		root;
20 	rwlock_t		lock;
21 	seqcount_rwlock_t	count;
22 	struct delayed_work	gc_work;
23 };
24 
25 struct nft_rbtree_elem {
26 	struct rb_node		node;
27 	struct nft_set_ext	ext;
28 };
29 
30 static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
31 {
32 	return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
33 	       (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
34 }
35 
36 static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
37 {
38 	return !nft_rbtree_interval_end(rbe);
39 }
40 
41 static int nft_rbtree_cmp(const struct nft_set *set,
42 			  const struct nft_rbtree_elem *e1,
43 			  const struct nft_rbtree_elem *e2)
44 {
45 	return memcmp(nft_set_ext_key(&e1->ext), nft_set_ext_key(&e2->ext),
46 		      set->klen);
47 }
48 
49 static bool nft_rbtree_elem_expired(const struct nft_rbtree_elem *rbe)
50 {
51 	return nft_set_elem_expired(&rbe->ext) ||
52 	       nft_set_elem_is_dead(&rbe->ext);
53 }
54 
55 static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
56 				const u32 *key, const struct nft_set_ext **ext,
57 				unsigned int seq)
58 {
59 	struct nft_rbtree *priv = nft_set_priv(set);
60 	const struct nft_rbtree_elem *rbe, *interval = NULL;
61 	u8 genmask = nft_genmask_cur(net);
62 	const struct rb_node *parent;
63 	int d;
64 
65 	parent = rcu_dereference_raw(priv->root.rb_node);
66 	while (parent != NULL) {
67 		if (read_seqcount_retry(&priv->count, seq))
68 			return false;
69 
70 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
71 
72 		d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen);
73 		if (d < 0) {
74 			parent = rcu_dereference_raw(parent->rb_left);
75 			if (interval &&
76 			    !nft_rbtree_cmp(set, rbe, interval) &&
77 			    nft_rbtree_interval_end(rbe) &&
78 			    nft_rbtree_interval_start(interval))
79 				continue;
80 			interval = rbe;
81 		} else if (d > 0)
82 			parent = rcu_dereference_raw(parent->rb_right);
83 		else {
84 			if (!nft_set_elem_active(&rbe->ext, genmask)) {
85 				parent = rcu_dereference_raw(parent->rb_left);
86 				continue;
87 			}
88 
89 			if (nft_rbtree_elem_expired(rbe))
90 				return false;
91 
92 			if (nft_rbtree_interval_end(rbe)) {
93 				if (nft_set_is_anonymous(set))
94 					return false;
95 				parent = rcu_dereference_raw(parent->rb_left);
96 				interval = NULL;
97 				continue;
98 			}
99 
100 			*ext = &rbe->ext;
101 			return true;
102 		}
103 	}
104 
105 	if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
106 	    nft_set_elem_active(&interval->ext, genmask) &&
107 	    !nft_rbtree_elem_expired(interval) &&
108 	    nft_rbtree_interval_start(interval)) {
109 		*ext = &interval->ext;
110 		return true;
111 	}
112 
113 	return false;
114 }
115 
116 INDIRECT_CALLABLE_SCOPE
117 bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
118 		       const u32 *key, const struct nft_set_ext **ext)
119 {
120 	struct nft_rbtree *priv = nft_set_priv(set);
121 	unsigned int seq = read_seqcount_begin(&priv->count);
122 	bool ret;
123 
124 	ret = __nft_rbtree_lookup(net, set, key, ext, seq);
125 	if (ret || !read_seqcount_retry(&priv->count, seq))
126 		return ret;
127 
128 	read_lock_bh(&priv->lock);
129 	seq = read_seqcount_begin(&priv->count);
130 	ret = __nft_rbtree_lookup(net, set, key, ext, seq);
131 	read_unlock_bh(&priv->lock);
132 
133 	return ret;
134 }
135 
136 static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
137 			     const u32 *key, struct nft_rbtree_elem **elem,
138 			     unsigned int seq, unsigned int flags, u8 genmask)
139 {
140 	struct nft_rbtree_elem *rbe, *interval = NULL;
141 	struct nft_rbtree *priv = nft_set_priv(set);
142 	const struct rb_node *parent;
143 	const void *this;
144 	int d;
145 
146 	parent = rcu_dereference_raw(priv->root.rb_node);
147 	while (parent != NULL) {
148 		if (read_seqcount_retry(&priv->count, seq))
149 			return false;
150 
151 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
152 
153 		this = nft_set_ext_key(&rbe->ext);
154 		d = memcmp(this, key, set->klen);
155 		if (d < 0) {
156 			parent = rcu_dereference_raw(parent->rb_left);
157 			if (!(flags & NFT_SET_ELEM_INTERVAL_END))
158 				interval = rbe;
159 		} else if (d > 0) {
160 			parent = rcu_dereference_raw(parent->rb_right);
161 			if (flags & NFT_SET_ELEM_INTERVAL_END)
162 				interval = rbe;
163 		} else {
164 			if (!nft_set_elem_active(&rbe->ext, genmask)) {
165 				parent = rcu_dereference_raw(parent->rb_left);
166 				continue;
167 			}
168 
169 			if (nft_set_elem_expired(&rbe->ext))
170 				return false;
171 
172 			if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
173 			    (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
174 			    (flags & NFT_SET_ELEM_INTERVAL_END)) {
175 				*elem = rbe;
176 				return true;
177 			}
178 
179 			if (nft_rbtree_interval_end(rbe))
180 				interval = NULL;
181 
182 			parent = rcu_dereference_raw(parent->rb_left);
183 		}
184 	}
185 
186 	if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
187 	    nft_set_elem_active(&interval->ext, genmask) &&
188 	    !nft_set_elem_expired(&interval->ext) &&
189 	    ((!nft_rbtree_interval_end(interval) &&
190 	      !(flags & NFT_SET_ELEM_INTERVAL_END)) ||
191 	     (nft_rbtree_interval_end(interval) &&
192 	      (flags & NFT_SET_ELEM_INTERVAL_END)))) {
193 		*elem = interval;
194 		return true;
195 	}
196 
197 	return false;
198 }
199 
200 static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
201 			    const struct nft_set_elem *elem, unsigned int flags)
202 {
203 	struct nft_rbtree *priv = nft_set_priv(set);
204 	unsigned int seq = read_seqcount_begin(&priv->count);
205 	struct nft_rbtree_elem *rbe = ERR_PTR(-ENOENT);
206 	const u32 *key = (const u32 *)&elem->key.val;
207 	u8 genmask = nft_genmask_cur(net);
208 	bool ret;
209 
210 	ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
211 	if (ret || !read_seqcount_retry(&priv->count, seq))
212 		return rbe;
213 
214 	read_lock_bh(&priv->lock);
215 	seq = read_seqcount_begin(&priv->count);
216 	ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
217 	if (!ret)
218 		rbe = ERR_PTR(-ENOENT);
219 	read_unlock_bh(&priv->lock);
220 
221 	return rbe;
222 }
223 
224 static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
225 				 struct nft_rbtree *priv,
226 				 struct nft_rbtree_elem *rbe)
227 {
228 	struct nft_set_elem elem = {
229 		.priv	= rbe,
230 	};
231 
232 	nft_setelem_data_deactivate(net, set, &elem);
233 	rb_erase(&rbe->node, &priv->root);
234 }
235 
236 static int nft_rbtree_gc_elem(const struct nft_set *__set,
237 			      struct nft_rbtree *priv,
238 			      struct nft_rbtree_elem *rbe,
239 			      u8 genmask)
240 {
241 	struct nft_set *set = (struct nft_set *)__set;
242 	struct rb_node *prev = rb_prev(&rbe->node);
243 	struct net *net = read_pnet(&set->net);
244 	struct nft_rbtree_elem *rbe_prev;
245 	struct nft_trans_gc *gc;
246 
247 	gc = nft_trans_gc_alloc(set, 0, GFP_ATOMIC);
248 	if (!gc)
249 		return -ENOMEM;
250 
251 	/* search for end interval coming before this element.
252 	 * end intervals don't carry a timeout extension, they
253 	 * are coupled with the interval start element.
254 	 */
255 	while (prev) {
256 		rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
257 		if (nft_rbtree_interval_end(rbe_prev) &&
258 		    nft_set_elem_active(&rbe_prev->ext, genmask))
259 			break;
260 
261 		prev = rb_prev(prev);
262 	}
263 
264 	if (prev) {
265 		rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
266 		nft_rbtree_gc_remove(net, set, priv, rbe_prev);
267 
268 		/* There is always room in this trans gc for this element,
269 		 * memory allocation never actually happens, hence, the warning
270 		 * splat in such case. No need to set NFT_SET_ELEM_DEAD_BIT,
271 		 * this is synchronous gc which never fails.
272 		 */
273 		gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
274 		if (WARN_ON_ONCE(!gc))
275 			return -ENOMEM;
276 
277 		nft_trans_gc_elem_add(gc, rbe_prev);
278 	}
279 
280 	nft_rbtree_gc_remove(net, set, priv, rbe);
281 	gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
282 	if (WARN_ON_ONCE(!gc))
283 		return -ENOMEM;
284 
285 	nft_trans_gc_elem_add(gc, rbe);
286 
287 	nft_trans_gc_queue_sync_done(gc);
288 
289 	return 0;
290 }
291 
292 static bool nft_rbtree_update_first(const struct nft_set *set,
293 				    struct nft_rbtree_elem *rbe,
294 				    struct rb_node *first)
295 {
296 	struct nft_rbtree_elem *first_elem;
297 
298 	first_elem = rb_entry(first, struct nft_rbtree_elem, node);
299 	/* this element is closest to where the new element is to be inserted:
300 	 * update the first element for the node list path.
301 	 */
302 	if (nft_rbtree_cmp(set, rbe, first_elem) < 0)
303 		return true;
304 
305 	return false;
306 }
307 
308 static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
309 			       struct nft_rbtree_elem *new,
310 			       struct nft_set_ext **ext)
311 {
312 	struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
313 	struct rb_node *node, *next, *parent, **p, *first = NULL;
314 	struct nft_rbtree *priv = nft_set_priv(set);
315 	u8 cur_genmask = nft_genmask_cur(net);
316 	u8 genmask = nft_genmask_next(net);
317 	int d, err;
318 
319 	/* Descend the tree to search for an existing element greater than the
320 	 * key value to insert that is greater than the new element. This is the
321 	 * first element to walk the ordered elements to find possible overlap.
322 	 */
323 	parent = NULL;
324 	p = &priv->root.rb_node;
325 	while (*p != NULL) {
326 		parent = *p;
327 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
328 		d = nft_rbtree_cmp(set, rbe, new);
329 
330 		if (d < 0) {
331 			p = &parent->rb_left;
332 		} else if (d > 0) {
333 			if (!first ||
334 			    nft_rbtree_update_first(set, rbe, first))
335 				first = &rbe->node;
336 
337 			p = &parent->rb_right;
338 		} else {
339 			if (nft_rbtree_interval_end(rbe))
340 				p = &parent->rb_left;
341 			else
342 				p = &parent->rb_right;
343 		}
344 	}
345 
346 	if (!first)
347 		first = rb_first(&priv->root);
348 
349 	/* Detect overlap by going through the list of valid tree nodes.
350 	 * Values stored in the tree are in reversed order, starting from
351 	 * highest to lowest value.
352 	 */
353 	for (node = first; node != NULL; node = next) {
354 		next = rb_next(node);
355 
356 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
357 
358 		if (!nft_set_elem_active(&rbe->ext, genmask))
359 			continue;
360 
361 		/* perform garbage collection to avoid bogus overlap reports
362 		 * but skip new elements in this transaction.
363 		 */
364 		if (nft_set_elem_expired(&rbe->ext) &&
365 		    nft_set_elem_active(&rbe->ext, cur_genmask)) {
366 			err = nft_rbtree_gc_elem(set, priv, rbe, genmask);
367 			if (err < 0)
368 				return err;
369 
370 			continue;
371 		}
372 
373 		d = nft_rbtree_cmp(set, rbe, new);
374 		if (d == 0) {
375 			/* Matching end element: no need to look for an
376 			 * overlapping greater or equal element.
377 			 */
378 			if (nft_rbtree_interval_end(rbe)) {
379 				rbe_le = rbe;
380 				break;
381 			}
382 
383 			/* first element that is greater or equal to key value. */
384 			if (!rbe_ge) {
385 				rbe_ge = rbe;
386 				continue;
387 			}
388 
389 			/* this is a closer more or equal element, update it. */
390 			if (nft_rbtree_cmp(set, rbe_ge, new) != 0) {
391 				rbe_ge = rbe;
392 				continue;
393 			}
394 
395 			/* element is equal to key value, make sure flags are
396 			 * the same, an existing more or equal start element
397 			 * must not be replaced by more or equal end element.
398 			 */
399 			if ((nft_rbtree_interval_start(new) &&
400 			     nft_rbtree_interval_start(rbe_ge)) ||
401 			    (nft_rbtree_interval_end(new) &&
402 			     nft_rbtree_interval_end(rbe_ge))) {
403 				rbe_ge = rbe;
404 				continue;
405 			}
406 		} else if (d > 0) {
407 			/* annotate element greater than the new element. */
408 			rbe_ge = rbe;
409 			continue;
410 		} else if (d < 0) {
411 			/* annotate element less than the new element. */
412 			rbe_le = rbe;
413 			break;
414 		}
415 	}
416 
417 	/* - new start element matching existing start element: full overlap
418 	 *   reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
419 	 */
420 	if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) &&
421 	    nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) {
422 		*ext = &rbe_ge->ext;
423 		return -EEXIST;
424 	}
425 
426 	/* - new end element matching existing end element: full overlap
427 	 *   reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
428 	 */
429 	if (rbe_le && !nft_rbtree_cmp(set, new, rbe_le) &&
430 	    nft_rbtree_interval_end(rbe_le) == nft_rbtree_interval_end(new)) {
431 		*ext = &rbe_le->ext;
432 		return -EEXIST;
433 	}
434 
435 	/* - new start element with existing closest, less or equal key value
436 	 *   being a start element: partial overlap, reported as -ENOTEMPTY.
437 	 *   Anonymous sets allow for two consecutive start element since they
438 	 *   are constant, skip them to avoid bogus overlap reports.
439 	 */
440 	if (!nft_set_is_anonymous(set) && rbe_le &&
441 	    nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new))
442 		return -ENOTEMPTY;
443 
444 	/* - new end element with existing closest, less or equal key value
445 	 *   being a end element: partial overlap, reported as -ENOTEMPTY.
446 	 */
447 	if (rbe_le &&
448 	    nft_rbtree_interval_end(rbe_le) && nft_rbtree_interval_end(new))
449 		return -ENOTEMPTY;
450 
451 	/* - new end element with existing closest, greater or equal key value
452 	 *   being an end element: partial overlap, reported as -ENOTEMPTY
453 	 */
454 	if (rbe_ge &&
455 	    nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new))
456 		return -ENOTEMPTY;
457 
458 	/* Accepted element: pick insertion point depending on key value */
459 	parent = NULL;
460 	p = &priv->root.rb_node;
461 	while (*p != NULL) {
462 		parent = *p;
463 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
464 		d = nft_rbtree_cmp(set, rbe, new);
465 
466 		if (d < 0)
467 			p = &parent->rb_left;
468 		else if (d > 0)
469 			p = &parent->rb_right;
470 		else if (nft_rbtree_interval_end(rbe))
471 			p = &parent->rb_left;
472 		else
473 			p = &parent->rb_right;
474 	}
475 
476 	rb_link_node_rcu(&new->node, parent, p);
477 	rb_insert_color(&new->node, &priv->root);
478 	return 0;
479 }
480 
481 static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
482 			     const struct nft_set_elem *elem,
483 			     struct nft_set_ext **ext)
484 {
485 	struct nft_rbtree *priv = nft_set_priv(set);
486 	struct nft_rbtree_elem *rbe = elem->priv;
487 	int err;
488 
489 	write_lock_bh(&priv->lock);
490 	write_seqcount_begin(&priv->count);
491 	err = __nft_rbtree_insert(net, set, rbe, ext);
492 	write_seqcount_end(&priv->count);
493 	write_unlock_bh(&priv->lock);
494 
495 	return err;
496 }
497 
498 static void nft_rbtree_remove(const struct net *net,
499 			      const struct nft_set *set,
500 			      const struct nft_set_elem *elem)
501 {
502 	struct nft_rbtree *priv = nft_set_priv(set);
503 	struct nft_rbtree_elem *rbe = elem->priv;
504 
505 	write_lock_bh(&priv->lock);
506 	write_seqcount_begin(&priv->count);
507 	rb_erase(&rbe->node, &priv->root);
508 	write_seqcount_end(&priv->count);
509 	write_unlock_bh(&priv->lock);
510 }
511 
512 static void nft_rbtree_activate(const struct net *net,
513 				const struct nft_set *set,
514 				const struct nft_set_elem *elem)
515 {
516 	struct nft_rbtree_elem *rbe = elem->priv;
517 
518 	nft_set_elem_change_active(net, set, &rbe->ext);
519 }
520 
521 static bool nft_rbtree_flush(const struct net *net,
522 			     const struct nft_set *set, void *priv)
523 {
524 	struct nft_rbtree_elem *rbe = priv;
525 
526 	nft_set_elem_change_active(net, set, &rbe->ext);
527 
528 	return true;
529 }
530 
531 static void *nft_rbtree_deactivate(const struct net *net,
532 				   const struct nft_set *set,
533 				   const struct nft_set_elem *elem)
534 {
535 	const struct nft_rbtree *priv = nft_set_priv(set);
536 	const struct rb_node *parent = priv->root.rb_node;
537 	struct nft_rbtree_elem *rbe, *this = elem->priv;
538 	u8 genmask = nft_genmask_next(net);
539 	int d;
540 
541 	while (parent != NULL) {
542 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
543 
544 		d = memcmp(nft_set_ext_key(&rbe->ext), &elem->key.val,
545 					   set->klen);
546 		if (d < 0)
547 			parent = parent->rb_left;
548 		else if (d > 0)
549 			parent = parent->rb_right;
550 		else {
551 			if (nft_rbtree_interval_end(rbe) &&
552 			    nft_rbtree_interval_start(this)) {
553 				parent = parent->rb_left;
554 				continue;
555 			} else if (nft_rbtree_interval_start(rbe) &&
556 				   nft_rbtree_interval_end(this)) {
557 				parent = parent->rb_right;
558 				continue;
559 			} else if (!nft_set_elem_active(&rbe->ext, genmask)) {
560 				parent = parent->rb_left;
561 				continue;
562 			}
563 			nft_rbtree_flush(net, set, rbe);
564 			return rbe;
565 		}
566 	}
567 	return NULL;
568 }
569 
570 static void nft_rbtree_walk(const struct nft_ctx *ctx,
571 			    struct nft_set *set,
572 			    struct nft_set_iter *iter)
573 {
574 	struct nft_rbtree *priv = nft_set_priv(set);
575 	struct nft_rbtree_elem *rbe;
576 	struct nft_set_elem elem;
577 	struct rb_node *node;
578 
579 	read_lock_bh(&priv->lock);
580 	for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
581 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
582 
583 		if (iter->count < iter->skip)
584 			goto cont;
585 		if (!nft_set_elem_active(&rbe->ext, iter->genmask))
586 			goto cont;
587 
588 		elem.priv = rbe;
589 
590 		iter->err = iter->fn(ctx, set, iter, &elem);
591 		if (iter->err < 0) {
592 			read_unlock_bh(&priv->lock);
593 			return;
594 		}
595 cont:
596 		iter->count++;
597 	}
598 	read_unlock_bh(&priv->lock);
599 }
600 
601 static void nft_rbtree_gc(struct work_struct *work)
602 {
603 	struct nft_rbtree_elem *rbe, *rbe_end = NULL;
604 	struct nftables_pernet *nft_net;
605 	struct nft_rbtree *priv;
606 	struct nft_trans_gc *gc;
607 	struct rb_node *node;
608 	struct nft_set *set;
609 	unsigned int gc_seq;
610 	struct net *net;
611 
612 	priv = container_of(work, struct nft_rbtree, gc_work.work);
613 	set  = nft_set_container_of(priv);
614 	net  = read_pnet(&set->net);
615 	nft_net = nft_pernet(net);
616 	gc_seq  = READ_ONCE(nft_net->gc_seq);
617 
618 	if (nft_set_gc_is_pending(set))
619 		goto done;
620 
621 	gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL);
622 	if (!gc)
623 		goto done;
624 
625 	write_lock_bh(&priv->lock);
626 	write_seqcount_begin(&priv->count);
627 	for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
628 
629 		/* Ruleset has been updated, try later. */
630 		if (READ_ONCE(nft_net->gc_seq) != gc_seq) {
631 			nft_trans_gc_destroy(gc);
632 			gc = NULL;
633 			goto try_later;
634 		}
635 
636 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
637 
638 		if (nft_set_elem_is_dead(&rbe->ext))
639 			goto dead_elem;
640 
641 		/* elements are reversed in the rbtree for historical reasons,
642 		 * from highest to lowest value, that is why end element is
643 		 * always visited before the start element.
644 		 */
645 		if (nft_rbtree_interval_end(rbe)) {
646 			rbe_end = rbe;
647 			continue;
648 		}
649 		if (!nft_set_elem_expired(&rbe->ext))
650 			continue;
651 
652 		nft_set_elem_dead(&rbe->ext);
653 
654 		if (!rbe_end)
655 			continue;
656 
657 		nft_set_elem_dead(&rbe_end->ext);
658 
659 		gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
660 		if (!gc)
661 			goto try_later;
662 
663 		nft_trans_gc_elem_add(gc, rbe_end);
664 		rbe_end = NULL;
665 dead_elem:
666 		gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
667 		if (!gc)
668 			goto try_later;
669 
670 		nft_trans_gc_elem_add(gc, rbe);
671 	}
672 
673 	gc = nft_trans_gc_catchall(gc, gc_seq);
674 
675 try_later:
676 	write_seqcount_end(&priv->count);
677 	write_unlock_bh(&priv->lock);
678 
679 	if (gc)
680 		nft_trans_gc_queue_async_done(gc);
681 done:
682 	queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
683 			   nft_set_gc_interval(set));
684 }
685 
686 static u64 nft_rbtree_privsize(const struct nlattr * const nla[],
687 			       const struct nft_set_desc *desc)
688 {
689 	return sizeof(struct nft_rbtree);
690 }
691 
692 static int nft_rbtree_init(const struct nft_set *set,
693 			   const struct nft_set_desc *desc,
694 			   const struct nlattr * const nla[])
695 {
696 	struct nft_rbtree *priv = nft_set_priv(set);
697 
698 	rwlock_init(&priv->lock);
699 	seqcount_rwlock_init(&priv->count, &priv->lock);
700 	priv->root = RB_ROOT;
701 
702 	INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rbtree_gc);
703 	if (set->flags & NFT_SET_TIMEOUT)
704 		queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
705 				   nft_set_gc_interval(set));
706 
707 	return 0;
708 }
709 
710 static void nft_rbtree_destroy(const struct nft_ctx *ctx,
711 			       const struct nft_set *set)
712 {
713 	struct nft_rbtree *priv = nft_set_priv(set);
714 	struct nft_rbtree_elem *rbe;
715 	struct rb_node *node;
716 
717 	cancel_delayed_work_sync(&priv->gc_work);
718 	rcu_barrier();
719 	while ((node = priv->root.rb_node) != NULL) {
720 		rb_erase(node, &priv->root);
721 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
722 		nf_tables_set_elem_destroy(ctx, set, rbe);
723 	}
724 }
725 
726 static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
727 				struct nft_set_estimate *est)
728 {
729 	if (desc->field_count > 1)
730 		return false;
731 
732 	if (desc->size)
733 		est->size = sizeof(struct nft_rbtree) +
734 			    desc->size * sizeof(struct nft_rbtree_elem);
735 	else
736 		est->size = ~0;
737 
738 	est->lookup = NFT_SET_CLASS_O_LOG_N;
739 	est->space  = NFT_SET_CLASS_O_N;
740 
741 	return true;
742 }
743 
744 const struct nft_set_type nft_set_rbtree_type = {
745 	.features	= NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
746 	.ops		= {
747 		.privsize	= nft_rbtree_privsize,
748 		.elemsize	= offsetof(struct nft_rbtree_elem, ext),
749 		.estimate	= nft_rbtree_estimate,
750 		.init		= nft_rbtree_init,
751 		.destroy	= nft_rbtree_destroy,
752 		.insert		= nft_rbtree_insert,
753 		.remove		= nft_rbtree_remove,
754 		.deactivate	= nft_rbtree_deactivate,
755 		.flush		= nft_rbtree_flush,
756 		.activate	= nft_rbtree_activate,
757 		.lookup		= nft_rbtree_lookup,
758 		.walk		= nft_rbtree_walk,
759 		.get		= nft_rbtree_get,
760 	},
761 };
762