xref: /openbmc/linux/net/netfilter/nft_set_rbtree.c (revision 36db6e8484ed455bbb320d89a119378897ae991c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4  *
5  * Development of this code funded by Astaro AG (http://www.astaro.com/)
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/rbtree.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17 
18 struct nft_rbtree {
19 	struct rb_root		root;
20 	rwlock_t		lock;
21 	seqcount_rwlock_t	count;
22 	unsigned long		last_gc;
23 };
24 
25 struct nft_rbtree_elem {
26 	struct rb_node		node;
27 	struct nft_set_ext	ext;
28 };
29 
nft_rbtree_interval_end(const struct nft_rbtree_elem * rbe)30 static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
31 {
32 	return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
33 	       (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
34 }
35 
nft_rbtree_interval_start(const struct nft_rbtree_elem * rbe)36 static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
37 {
38 	return !nft_rbtree_interval_end(rbe);
39 }
40 
nft_rbtree_cmp(const struct nft_set * set,const struct nft_rbtree_elem * e1,const struct nft_rbtree_elem * e2)41 static int nft_rbtree_cmp(const struct nft_set *set,
42 			  const struct nft_rbtree_elem *e1,
43 			  const struct nft_rbtree_elem *e2)
44 {
45 	return memcmp(nft_set_ext_key(&e1->ext), nft_set_ext_key(&e2->ext),
46 		      set->klen);
47 }
48 
nft_rbtree_elem_expired(const struct nft_rbtree_elem * rbe)49 static bool nft_rbtree_elem_expired(const struct nft_rbtree_elem *rbe)
50 {
51 	return nft_set_elem_expired(&rbe->ext);
52 }
53 
__nft_rbtree_lookup(const struct net * net,const struct nft_set * set,const u32 * key,const struct nft_set_ext ** ext,unsigned int seq)54 static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
55 				const u32 *key, const struct nft_set_ext **ext,
56 				unsigned int seq)
57 {
58 	struct nft_rbtree *priv = nft_set_priv(set);
59 	const struct nft_rbtree_elem *rbe, *interval = NULL;
60 	u8 genmask = nft_genmask_cur(net);
61 	const struct rb_node *parent;
62 	int d;
63 
64 	parent = rcu_dereference_raw(priv->root.rb_node);
65 	while (parent != NULL) {
66 		if (read_seqcount_retry(&priv->count, seq))
67 			return false;
68 
69 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
70 
71 		d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen);
72 		if (d < 0) {
73 			parent = rcu_dereference_raw(parent->rb_left);
74 			if (interval &&
75 			    !nft_rbtree_cmp(set, rbe, interval) &&
76 			    nft_rbtree_interval_end(rbe) &&
77 			    nft_rbtree_interval_start(interval))
78 				continue;
79 			interval = rbe;
80 		} else if (d > 0)
81 			parent = rcu_dereference_raw(parent->rb_right);
82 		else {
83 			if (!nft_set_elem_active(&rbe->ext, genmask)) {
84 				parent = rcu_dereference_raw(parent->rb_left);
85 				continue;
86 			}
87 
88 			if (nft_rbtree_elem_expired(rbe))
89 				return false;
90 
91 			if (nft_rbtree_interval_end(rbe)) {
92 				if (nft_set_is_anonymous(set))
93 					return false;
94 				parent = rcu_dereference_raw(parent->rb_left);
95 				interval = NULL;
96 				continue;
97 			}
98 
99 			*ext = &rbe->ext;
100 			return true;
101 		}
102 	}
103 
104 	if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
105 	    nft_set_elem_active(&interval->ext, genmask) &&
106 	    !nft_rbtree_elem_expired(interval) &&
107 	    nft_rbtree_interval_start(interval)) {
108 		*ext = &interval->ext;
109 		return true;
110 	}
111 
112 	return false;
113 }
114 
115 INDIRECT_CALLABLE_SCOPE
nft_rbtree_lookup(const struct net * net,const struct nft_set * set,const u32 * key,const struct nft_set_ext ** ext)116 bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
117 		       const u32 *key, const struct nft_set_ext **ext)
118 {
119 	struct nft_rbtree *priv = nft_set_priv(set);
120 	unsigned int seq = read_seqcount_begin(&priv->count);
121 	bool ret;
122 
123 	ret = __nft_rbtree_lookup(net, set, key, ext, seq);
124 	if (ret || !read_seqcount_retry(&priv->count, seq))
125 		return ret;
126 
127 	read_lock_bh(&priv->lock);
128 	seq = read_seqcount_begin(&priv->count);
129 	ret = __nft_rbtree_lookup(net, set, key, ext, seq);
130 	read_unlock_bh(&priv->lock);
131 
132 	return ret;
133 }
134 
__nft_rbtree_get(const struct net * net,const struct nft_set * set,const u32 * key,struct nft_rbtree_elem ** elem,unsigned int seq,unsigned int flags,u8 genmask)135 static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
136 			     const u32 *key, struct nft_rbtree_elem **elem,
137 			     unsigned int seq, unsigned int flags, u8 genmask)
138 {
139 	struct nft_rbtree_elem *rbe, *interval = NULL;
140 	struct nft_rbtree *priv = nft_set_priv(set);
141 	const struct rb_node *parent;
142 	const void *this;
143 	int d;
144 
145 	parent = rcu_dereference_raw(priv->root.rb_node);
146 	while (parent != NULL) {
147 		if (read_seqcount_retry(&priv->count, seq))
148 			return false;
149 
150 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
151 
152 		this = nft_set_ext_key(&rbe->ext);
153 		d = memcmp(this, key, set->klen);
154 		if (d < 0) {
155 			parent = rcu_dereference_raw(parent->rb_left);
156 			if (!(flags & NFT_SET_ELEM_INTERVAL_END))
157 				interval = rbe;
158 		} else if (d > 0) {
159 			parent = rcu_dereference_raw(parent->rb_right);
160 			if (flags & NFT_SET_ELEM_INTERVAL_END)
161 				interval = rbe;
162 		} else {
163 			if (!nft_set_elem_active(&rbe->ext, genmask)) {
164 				parent = rcu_dereference_raw(parent->rb_left);
165 				continue;
166 			}
167 
168 			if (nft_set_elem_expired(&rbe->ext))
169 				return false;
170 
171 			if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
172 			    (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
173 			    (flags & NFT_SET_ELEM_INTERVAL_END)) {
174 				*elem = rbe;
175 				return true;
176 			}
177 
178 			if (nft_rbtree_interval_end(rbe))
179 				interval = NULL;
180 
181 			parent = rcu_dereference_raw(parent->rb_left);
182 		}
183 	}
184 
185 	if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
186 	    nft_set_elem_active(&interval->ext, genmask) &&
187 	    !nft_set_elem_expired(&interval->ext) &&
188 	    ((!nft_rbtree_interval_end(interval) &&
189 	      !(flags & NFT_SET_ELEM_INTERVAL_END)) ||
190 	     (nft_rbtree_interval_end(interval) &&
191 	      (flags & NFT_SET_ELEM_INTERVAL_END)))) {
192 		*elem = interval;
193 		return true;
194 	}
195 
196 	return false;
197 }
198 
nft_rbtree_get(const struct net * net,const struct nft_set * set,const struct nft_set_elem * elem,unsigned int flags)199 static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
200 			    const struct nft_set_elem *elem, unsigned int flags)
201 {
202 	struct nft_rbtree *priv = nft_set_priv(set);
203 	unsigned int seq = read_seqcount_begin(&priv->count);
204 	struct nft_rbtree_elem *rbe = ERR_PTR(-ENOENT);
205 	const u32 *key = (const u32 *)&elem->key.val;
206 	u8 genmask = nft_genmask_cur(net);
207 	bool ret;
208 
209 	ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
210 	if (ret || !read_seqcount_retry(&priv->count, seq))
211 		return rbe;
212 
213 	read_lock_bh(&priv->lock);
214 	seq = read_seqcount_begin(&priv->count);
215 	ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
216 	if (!ret)
217 		rbe = ERR_PTR(-ENOENT);
218 	read_unlock_bh(&priv->lock);
219 
220 	return rbe;
221 }
222 
nft_rbtree_gc_elem_remove(struct net * net,struct nft_set * set,struct nft_rbtree * priv,struct nft_rbtree_elem * rbe)223 static void nft_rbtree_gc_elem_remove(struct net *net, struct nft_set *set,
224 				      struct nft_rbtree *priv,
225 				      struct nft_rbtree_elem *rbe)
226 {
227 	struct nft_set_elem elem = {
228 		.priv	= rbe,
229 	};
230 
231 	lockdep_assert_held_write(&priv->lock);
232 	nft_setelem_data_deactivate(net, set, &elem);
233 	rb_erase(&rbe->node, &priv->root);
234 }
235 
236 static const struct nft_rbtree_elem *
nft_rbtree_gc_elem(const struct nft_set * __set,struct nft_rbtree * priv,struct nft_rbtree_elem * rbe)237 nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
238 		   struct nft_rbtree_elem *rbe)
239 {
240 	struct nft_set *set = (struct nft_set *)__set;
241 	struct rb_node *prev = rb_prev(&rbe->node);
242 	struct net *net = read_pnet(&set->net);
243 	struct nft_rbtree_elem *rbe_prev;
244 	struct nft_trans_gc *gc;
245 
246 	gc = nft_trans_gc_alloc(set, 0, GFP_ATOMIC);
247 	if (!gc)
248 		return ERR_PTR(-ENOMEM);
249 
250 	/* search for end interval coming before this element.
251 	 * end intervals don't carry a timeout extension, they
252 	 * are coupled with the interval start element.
253 	 */
254 	while (prev) {
255 		rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
256 		if (nft_rbtree_interval_end(rbe_prev) &&
257 		    nft_set_elem_active(&rbe_prev->ext, NFT_GENMASK_ANY))
258 			break;
259 
260 		prev = rb_prev(prev);
261 	}
262 
263 	rbe_prev = NULL;
264 	if (prev) {
265 		rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
266 		nft_rbtree_gc_elem_remove(net, set, priv, rbe_prev);
267 
268 		/* There is always room in this trans gc for this element,
269 		 * memory allocation never actually happens, hence, the warning
270 		 * splat in such case. No need to set NFT_SET_ELEM_DEAD_BIT,
271 		 * this is synchronous gc which never fails.
272 		 */
273 		gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
274 		if (WARN_ON_ONCE(!gc))
275 			return ERR_PTR(-ENOMEM);
276 
277 		nft_trans_gc_elem_add(gc, rbe_prev);
278 	}
279 
280 	nft_rbtree_gc_elem_remove(net, set, priv, rbe);
281 	gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
282 	if (WARN_ON_ONCE(!gc))
283 		return ERR_PTR(-ENOMEM);
284 
285 	nft_trans_gc_elem_add(gc, rbe);
286 
287 	nft_trans_gc_queue_sync_done(gc);
288 
289 	return rbe_prev;
290 }
291 
nft_rbtree_update_first(const struct nft_set * set,struct nft_rbtree_elem * rbe,struct rb_node * first)292 static bool nft_rbtree_update_first(const struct nft_set *set,
293 				    struct nft_rbtree_elem *rbe,
294 				    struct rb_node *first)
295 {
296 	struct nft_rbtree_elem *first_elem;
297 
298 	first_elem = rb_entry(first, struct nft_rbtree_elem, node);
299 	/* this element is closest to where the new element is to be inserted:
300 	 * update the first element for the node list path.
301 	 */
302 	if (nft_rbtree_cmp(set, rbe, first_elem) < 0)
303 		return true;
304 
305 	return false;
306 }
307 
__nft_rbtree_insert(const struct net * net,const struct nft_set * set,struct nft_rbtree_elem * new,struct nft_set_ext ** ext)308 static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
309 			       struct nft_rbtree_elem *new,
310 			       struct nft_set_ext **ext)
311 {
312 	struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
313 	struct rb_node *node, *next, *parent, **p, *first = NULL;
314 	struct nft_rbtree *priv = nft_set_priv(set);
315 	u8 cur_genmask = nft_genmask_cur(net);
316 	u8 genmask = nft_genmask_next(net);
317 	u64 tstamp = nft_net_tstamp(net);
318 	int d;
319 
320 	/* Descend the tree to search for an existing element greater than the
321 	 * key value to insert that is greater than the new element. This is the
322 	 * first element to walk the ordered elements to find possible overlap.
323 	 */
324 	parent = NULL;
325 	p = &priv->root.rb_node;
326 	while (*p != NULL) {
327 		parent = *p;
328 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
329 		d = nft_rbtree_cmp(set, rbe, new);
330 
331 		if (d < 0) {
332 			p = &parent->rb_left;
333 		} else if (d > 0) {
334 			if (!first ||
335 			    nft_rbtree_update_first(set, rbe, first))
336 				first = &rbe->node;
337 
338 			p = &parent->rb_right;
339 		} else {
340 			if (nft_rbtree_interval_end(rbe))
341 				p = &parent->rb_left;
342 			else
343 				p = &parent->rb_right;
344 		}
345 	}
346 
347 	if (!first)
348 		first = rb_first(&priv->root);
349 
350 	/* Detect overlap by going through the list of valid tree nodes.
351 	 * Values stored in the tree are in reversed order, starting from
352 	 * highest to lowest value.
353 	 */
354 	for (node = first; node != NULL; node = next) {
355 		next = rb_next(node);
356 
357 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
358 
359 		if (!nft_set_elem_active(&rbe->ext, genmask))
360 			continue;
361 
362 		/* perform garbage collection to avoid bogus overlap reports
363 		 * but skip new elements in this transaction.
364 		 */
365 		if (__nft_set_elem_expired(&rbe->ext, tstamp) &&
366 		    nft_set_elem_active(&rbe->ext, cur_genmask)) {
367 			const struct nft_rbtree_elem *removed_end;
368 
369 			removed_end = nft_rbtree_gc_elem(set, priv, rbe);
370 			if (IS_ERR(removed_end))
371 				return PTR_ERR(removed_end);
372 
373 			if (removed_end == rbe_le || removed_end == rbe_ge)
374 				return -EAGAIN;
375 
376 			continue;
377 		}
378 
379 		d = nft_rbtree_cmp(set, rbe, new);
380 		if (d == 0) {
381 			/* Matching end element: no need to look for an
382 			 * overlapping greater or equal element.
383 			 */
384 			if (nft_rbtree_interval_end(rbe)) {
385 				rbe_le = rbe;
386 				break;
387 			}
388 
389 			/* first element that is greater or equal to key value. */
390 			if (!rbe_ge) {
391 				rbe_ge = rbe;
392 				continue;
393 			}
394 
395 			/* this is a closer more or equal element, update it. */
396 			if (nft_rbtree_cmp(set, rbe_ge, new) != 0) {
397 				rbe_ge = rbe;
398 				continue;
399 			}
400 
401 			/* element is equal to key value, make sure flags are
402 			 * the same, an existing more or equal start element
403 			 * must not be replaced by more or equal end element.
404 			 */
405 			if ((nft_rbtree_interval_start(new) &&
406 			     nft_rbtree_interval_start(rbe_ge)) ||
407 			    (nft_rbtree_interval_end(new) &&
408 			     nft_rbtree_interval_end(rbe_ge))) {
409 				rbe_ge = rbe;
410 				continue;
411 			}
412 		} else if (d > 0) {
413 			/* annotate element greater than the new element. */
414 			rbe_ge = rbe;
415 			continue;
416 		} else if (d < 0) {
417 			/* annotate element less than the new element. */
418 			rbe_le = rbe;
419 			break;
420 		}
421 	}
422 
423 	/* - new start element matching existing start element: full overlap
424 	 *   reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
425 	 */
426 	if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) &&
427 	    nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) {
428 		*ext = &rbe_ge->ext;
429 		return -EEXIST;
430 	}
431 
432 	/* - new end element matching existing end element: full overlap
433 	 *   reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
434 	 */
435 	if (rbe_le && !nft_rbtree_cmp(set, new, rbe_le) &&
436 	    nft_rbtree_interval_end(rbe_le) == nft_rbtree_interval_end(new)) {
437 		*ext = &rbe_le->ext;
438 		return -EEXIST;
439 	}
440 
441 	/* - new start element with existing closest, less or equal key value
442 	 *   being a start element: partial overlap, reported as -ENOTEMPTY.
443 	 *   Anonymous sets allow for two consecutive start element since they
444 	 *   are constant, skip them to avoid bogus overlap reports.
445 	 */
446 	if (!nft_set_is_anonymous(set) && rbe_le &&
447 	    nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new))
448 		return -ENOTEMPTY;
449 
450 	/* - new end element with existing closest, less or equal key value
451 	 *   being a end element: partial overlap, reported as -ENOTEMPTY.
452 	 */
453 	if (rbe_le &&
454 	    nft_rbtree_interval_end(rbe_le) && nft_rbtree_interval_end(new))
455 		return -ENOTEMPTY;
456 
457 	/* - new end element with existing closest, greater or equal key value
458 	 *   being an end element: partial overlap, reported as -ENOTEMPTY
459 	 */
460 	if (rbe_ge &&
461 	    nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new))
462 		return -ENOTEMPTY;
463 
464 	/* Accepted element: pick insertion point depending on key value */
465 	parent = NULL;
466 	p = &priv->root.rb_node;
467 	while (*p != NULL) {
468 		parent = *p;
469 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
470 		d = nft_rbtree_cmp(set, rbe, new);
471 
472 		if (d < 0)
473 			p = &parent->rb_left;
474 		else if (d > 0)
475 			p = &parent->rb_right;
476 		else if (nft_rbtree_interval_end(rbe))
477 			p = &parent->rb_left;
478 		else
479 			p = &parent->rb_right;
480 	}
481 
482 	rb_link_node_rcu(&new->node, parent, p);
483 	rb_insert_color(&new->node, &priv->root);
484 	return 0;
485 }
486 
nft_rbtree_insert(const struct net * net,const struct nft_set * set,const struct nft_set_elem * elem,struct nft_set_ext ** ext)487 static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
488 			     const struct nft_set_elem *elem,
489 			     struct nft_set_ext **ext)
490 {
491 	struct nft_rbtree *priv = nft_set_priv(set);
492 	struct nft_rbtree_elem *rbe = elem->priv;
493 	int err;
494 
495 	do {
496 		if (fatal_signal_pending(current))
497 			return -EINTR;
498 
499 		cond_resched();
500 
501 		write_lock_bh(&priv->lock);
502 		write_seqcount_begin(&priv->count);
503 		err = __nft_rbtree_insert(net, set, rbe, ext);
504 		write_seqcount_end(&priv->count);
505 		write_unlock_bh(&priv->lock);
506 	} while (err == -EAGAIN);
507 
508 	return err;
509 }
510 
nft_rbtree_erase(struct nft_rbtree * priv,struct nft_rbtree_elem * rbe)511 static void nft_rbtree_erase(struct nft_rbtree *priv, struct nft_rbtree_elem *rbe)
512 {
513 	write_lock_bh(&priv->lock);
514 	write_seqcount_begin(&priv->count);
515 	rb_erase(&rbe->node, &priv->root);
516 	write_seqcount_end(&priv->count);
517 	write_unlock_bh(&priv->lock);
518 }
519 
nft_rbtree_remove(const struct net * net,const struct nft_set * set,const struct nft_set_elem * elem)520 static void nft_rbtree_remove(const struct net *net,
521 			      const struct nft_set *set,
522 			      const struct nft_set_elem *elem)
523 {
524 	struct nft_rbtree *priv = nft_set_priv(set);
525 	struct nft_rbtree_elem *rbe = elem->priv;
526 
527 	nft_rbtree_erase(priv, rbe);
528 }
529 
nft_rbtree_activate(const struct net * net,const struct nft_set * set,const struct nft_set_elem * elem)530 static void nft_rbtree_activate(const struct net *net,
531 				const struct nft_set *set,
532 				const struct nft_set_elem *elem)
533 {
534 	struct nft_rbtree_elem *rbe = elem->priv;
535 
536 	nft_clear(net, &rbe->ext);
537 }
538 
nft_rbtree_flush(const struct net * net,const struct nft_set * set,void * priv)539 static bool nft_rbtree_flush(const struct net *net,
540 			     const struct nft_set *set, void *priv)
541 {
542 	struct nft_rbtree_elem *rbe = priv;
543 
544 	nft_set_elem_change_active(net, set, &rbe->ext);
545 
546 	return true;
547 }
548 
nft_rbtree_deactivate(const struct net * net,const struct nft_set * set,const struct nft_set_elem * elem)549 static void *nft_rbtree_deactivate(const struct net *net,
550 				   const struct nft_set *set,
551 				   const struct nft_set_elem *elem)
552 {
553 	const struct nft_rbtree *priv = nft_set_priv(set);
554 	const struct rb_node *parent = priv->root.rb_node;
555 	struct nft_rbtree_elem *rbe, *this = elem->priv;
556 	u8 genmask = nft_genmask_next(net);
557 	u64 tstamp = nft_net_tstamp(net);
558 	int d;
559 
560 	while (parent != NULL) {
561 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
562 
563 		d = memcmp(nft_set_ext_key(&rbe->ext), &elem->key.val,
564 					   set->klen);
565 		if (d < 0)
566 			parent = parent->rb_left;
567 		else if (d > 0)
568 			parent = parent->rb_right;
569 		else {
570 			if (nft_rbtree_interval_end(rbe) &&
571 			    nft_rbtree_interval_start(this)) {
572 				parent = parent->rb_left;
573 				continue;
574 			} else if (nft_rbtree_interval_start(rbe) &&
575 				   nft_rbtree_interval_end(this)) {
576 				parent = parent->rb_right;
577 				continue;
578 			} else if (__nft_set_elem_expired(&rbe->ext, tstamp)) {
579 				break;
580 			} else if (!nft_set_elem_active(&rbe->ext, genmask)) {
581 				parent = parent->rb_left;
582 				continue;
583 			}
584 			nft_rbtree_flush(net, set, rbe);
585 			return rbe;
586 		}
587 	}
588 	return NULL;
589 }
590 
nft_rbtree_walk(const struct nft_ctx * ctx,struct nft_set * set,struct nft_set_iter * iter)591 static void nft_rbtree_walk(const struct nft_ctx *ctx,
592 			    struct nft_set *set,
593 			    struct nft_set_iter *iter)
594 {
595 	struct nft_rbtree *priv = nft_set_priv(set);
596 	struct nft_rbtree_elem *rbe;
597 	struct nft_set_elem elem;
598 	struct rb_node *node;
599 
600 	read_lock_bh(&priv->lock);
601 	for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
602 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
603 
604 		if (iter->count < iter->skip)
605 			goto cont;
606 
607 		elem.priv = rbe;
608 
609 		iter->err = iter->fn(ctx, set, iter, &elem);
610 		if (iter->err < 0) {
611 			read_unlock_bh(&priv->lock);
612 			return;
613 		}
614 cont:
615 		iter->count++;
616 	}
617 	read_unlock_bh(&priv->lock);
618 }
619 
nft_rbtree_gc_remove(struct net * net,struct nft_set * set,struct nft_rbtree * priv,struct nft_rbtree_elem * rbe)620 static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
621 				 struct nft_rbtree *priv,
622 				 struct nft_rbtree_elem *rbe)
623 {
624 	struct nft_set_elem elem = {
625 		.priv	= rbe,
626 	};
627 
628 	nft_setelem_data_deactivate(net, set, &elem);
629 	nft_rbtree_erase(priv, rbe);
630 }
631 
nft_rbtree_gc(struct nft_set * set)632 static void nft_rbtree_gc(struct nft_set *set)
633 {
634 	struct nft_rbtree *priv = nft_set_priv(set);
635 	struct nft_rbtree_elem *rbe, *rbe_end = NULL;
636 	struct nftables_pernet *nft_net;
637 	struct net *net = read_pnet(&set->net);
638 	u64 tstamp = nft_net_tstamp(net);
639 	struct rb_node *node, *next;
640 	struct nft_trans_gc *gc;
641 
642 	set  = nft_set_container_of(priv);
643 	net  = read_pnet(&set->net);
644 	nft_net = nft_pernet(net);
645 
646 	gc = nft_trans_gc_alloc(set, 0, GFP_KERNEL);
647 	if (!gc)
648 		return;
649 
650 	for (node = rb_first(&priv->root); node ; node = next) {
651 		next = rb_next(node);
652 
653 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
654 
655 		/* elements are reversed in the rbtree for historical reasons,
656 		 * from highest to lowest value, that is why end element is
657 		 * always visited before the start element.
658 		 */
659 		if (nft_rbtree_interval_end(rbe)) {
660 			rbe_end = rbe;
661 			continue;
662 		}
663 		if (!__nft_set_elem_expired(&rbe->ext, tstamp))
664 			continue;
665 
666 		gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
667 		if (!gc)
668 			goto try_later;
669 
670 		/* end element needs to be removed first, it has
671 		 * no timeout extension.
672 		 */
673 		if (rbe_end) {
674 			nft_rbtree_gc_remove(net, set, priv, rbe_end);
675 			nft_trans_gc_elem_add(gc, rbe_end);
676 			rbe_end = NULL;
677 		}
678 
679 		gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
680 		if (!gc)
681 			goto try_later;
682 
683 		nft_rbtree_gc_remove(net, set, priv, rbe);
684 		nft_trans_gc_elem_add(gc, rbe);
685 	}
686 
687 try_later:
688 
689 	if (gc) {
690 		gc = nft_trans_gc_catchall_sync(gc);
691 		nft_trans_gc_queue_sync_done(gc);
692 		priv->last_gc = jiffies;
693 	}
694 }
695 
nft_rbtree_privsize(const struct nlattr * const nla[],const struct nft_set_desc * desc)696 static u64 nft_rbtree_privsize(const struct nlattr * const nla[],
697 			       const struct nft_set_desc *desc)
698 {
699 	return sizeof(struct nft_rbtree);
700 }
701 
nft_rbtree_init(const struct nft_set * set,const struct nft_set_desc * desc,const struct nlattr * const nla[])702 static int nft_rbtree_init(const struct nft_set *set,
703 			   const struct nft_set_desc *desc,
704 			   const struct nlattr * const nla[])
705 {
706 	struct nft_rbtree *priv = nft_set_priv(set);
707 
708 	rwlock_init(&priv->lock);
709 	seqcount_rwlock_init(&priv->count, &priv->lock);
710 	priv->root = RB_ROOT;
711 
712 	return 0;
713 }
714 
nft_rbtree_destroy(const struct nft_ctx * ctx,const struct nft_set * set)715 static void nft_rbtree_destroy(const struct nft_ctx *ctx,
716 			       const struct nft_set *set)
717 {
718 	struct nft_rbtree *priv = nft_set_priv(set);
719 	struct nft_rbtree_elem *rbe;
720 	struct rb_node *node;
721 
722 	while ((node = priv->root.rb_node) != NULL) {
723 		rb_erase(node, &priv->root);
724 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
725 		nf_tables_set_elem_destroy(ctx, set, rbe);
726 	}
727 }
728 
nft_rbtree_estimate(const struct nft_set_desc * desc,u32 features,struct nft_set_estimate * est)729 static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
730 				struct nft_set_estimate *est)
731 {
732 	if (desc->field_count > 1)
733 		return false;
734 
735 	if (desc->size)
736 		est->size = sizeof(struct nft_rbtree) +
737 			    desc->size * sizeof(struct nft_rbtree_elem);
738 	else
739 		est->size = ~0;
740 
741 	est->lookup = NFT_SET_CLASS_O_LOG_N;
742 	est->space  = NFT_SET_CLASS_O_N;
743 
744 	return true;
745 }
746 
nft_rbtree_commit(struct nft_set * set)747 static void nft_rbtree_commit(struct nft_set *set)
748 {
749 	struct nft_rbtree *priv = nft_set_priv(set);
750 
751 	if (time_after_eq(jiffies, priv->last_gc + nft_set_gc_interval(set)))
752 		nft_rbtree_gc(set);
753 }
754 
nft_rbtree_gc_init(const struct nft_set * set)755 static void nft_rbtree_gc_init(const struct nft_set *set)
756 {
757 	struct nft_rbtree *priv = nft_set_priv(set);
758 
759 	priv->last_gc = jiffies;
760 }
761 
762 /* rbtree stores ranges as singleton elements, each range is composed of two
763  * elements ...
764  */
nft_rbtree_ksize(u32 size)765 static u32 nft_rbtree_ksize(u32 size)
766 {
767 	return size * 2;
768 }
769 
770 /* ... hide this detail to userspace. */
nft_rbtree_usize(u32 size)771 static u32 nft_rbtree_usize(u32 size)
772 {
773 	if (!size)
774 		return 0;
775 
776 	return size / 2;
777 }
778 
nft_rbtree_adjust_maxsize(const struct nft_set * set)779 static u32 nft_rbtree_adjust_maxsize(const struct nft_set *set)
780 {
781 	struct nft_rbtree *priv = nft_set_priv(set);
782 	struct nft_rbtree_elem *rbe;
783 	struct rb_node *node;
784 	const void *key;
785 
786 	node = rb_last(&priv->root);
787 	if (!node)
788 		return 0;
789 
790 	rbe = rb_entry(node, struct nft_rbtree_elem, node);
791 	if (!nft_rbtree_interval_end(rbe))
792 		return 0;
793 
794 	key = nft_set_ext_key(&rbe->ext);
795 	if (memchr(key, 1, set->klen))
796 		return 0;
797 
798 	/* this is the all-zero no-match element. */
799 	return 1;
800 }
801 
802 const struct nft_set_type nft_set_rbtree_type = {
803 	.features	= NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
804 	.ops		= {
805 		.privsize	= nft_rbtree_privsize,
806 		.elemsize	= offsetof(struct nft_rbtree_elem, ext),
807 		.estimate	= nft_rbtree_estimate,
808 		.init		= nft_rbtree_init,
809 		.destroy	= nft_rbtree_destroy,
810 		.insert		= nft_rbtree_insert,
811 		.remove		= nft_rbtree_remove,
812 		.deactivate	= nft_rbtree_deactivate,
813 		.flush		= nft_rbtree_flush,
814 		.activate	= nft_rbtree_activate,
815 		.commit		= nft_rbtree_commit,
816 		.gc_init	= nft_rbtree_gc_init,
817 		.lookup		= nft_rbtree_lookup,
818 		.walk		= nft_rbtree_walk,
819 		.get		= nft_rbtree_get,
820 		.ksize		= nft_rbtree_ksize,
821 		.usize		= nft_rbtree_usize,
822 		.adjust_maxsize = nft_rbtree_adjust_maxsize,
823 	},
824 };
825