1 // SPDX-License-Identifier: GPL-2.0-only
2
3 /* PIPAPO: PIle PAcket POlicies: set for arbitrary concatenations of ranges
4 *
5 * Copyright (c) 2019-2020 Red Hat GmbH
6 *
7 * Author: Stefano Brivio <sbrivio@redhat.com>
8 */
9
10 /**
11 * DOC: Theory of Operation
12 *
13 *
14 * Problem
15 * -------
16 *
17 * Match packet bytes against entries composed of ranged or non-ranged packet
18 * field specifiers, mapping them to arbitrary references. For example:
19 *
20 * ::
21 *
22 * --- fields --->
23 * | [net],[port],[net]... => [reference]
24 * entries [net],[port],[net]... => [reference]
25 * | [net],[port],[net]... => [reference]
26 * V ...
27 *
28 * where [net] fields can be IP ranges or netmasks, and [port] fields are port
29 * ranges. Arbitrary packet fields can be matched.
30 *
31 *
32 * Algorithm Overview
33 * ------------------
34 *
35 * This algorithm is loosely inspired by [Ligatti 2010], and fundamentally
36 * relies on the consideration that every contiguous range in a space of b bits
37 * can be converted into b * 2 netmasks, from Theorem 3 in [Rottenstreich 2010],
38 * as also illustrated in Section 9 of [Kogan 2014].
39 *
40 * Classification against a number of entries, that require matching given bits
41 * of a packet field, is performed by grouping those bits in sets of arbitrary
42 * size, and classifying packet bits one group at a time.
43 *
44 * Example:
45 * to match the source port (16 bits) of a packet, we can divide those 16 bits
46 * in 4 groups of 4 bits each. Given the entry:
47 * 0000 0001 0101 1001
48 * and a packet with source port:
49 * 0000 0001 1010 1001
50 * first and second groups match, but the third doesn't. We conclude that the
51 * packet doesn't match the given entry.
52 *
53 * Translate the set to a sequence of lookup tables, one per field. Each table
54 * has two dimensions: bit groups to be matched for a single packet field, and
55 * all the possible values of said groups (buckets). Input entries are
56 * represented as one or more rules, depending on the number of composing
57 * netmasks for the given field specifier, and a group match is indicated as a
58 * set bit, with number corresponding to the rule index, in all the buckets
59 * whose value matches the entry for a given group.
60 *
61 * Rules are mapped between fields through an array of x, n pairs, with each
62 * item mapping a matched rule to one or more rules. The position of the pair in
63 * the array indicates the matched rule to be mapped to the next field, x
64 * indicates the first rule index in the next field, and n the amount of
65 * next-field rules the current rule maps to.
66 *
67 * The mapping array for the last field maps to the desired references.
68 *
69 * To match, we perform table lookups using the values of grouped packet bits,
70 * and use a sequence of bitwise operations to progressively evaluate rule
71 * matching.
72 *
73 * A stand-alone, reference implementation, also including notes about possible
74 * future optimisations, is available at:
75 * https://pipapo.lameexcu.se/
76 *
77 * Insertion
78 * ---------
79 *
80 * - For each packet field:
81 *
82 * - divide the b packet bits we want to classify into groups of size t,
83 * obtaining ceil(b / t) groups
84 *
85 * Example: match on destination IP address, with t = 4: 32 bits, 8 groups
86 * of 4 bits each
87 *
88 * - allocate a lookup table with one column ("bucket") for each possible
89 * value of a group, and with one row for each group
90 *
91 * Example: 8 groups, 2^4 buckets:
92 *
93 * ::
94 *
95 * bucket
96 * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
97 * 0
98 * 1
99 * 2
100 * 3
101 * 4
102 * 5
103 * 6
104 * 7
105 *
106 * - map the bits we want to classify for the current field, for a given
107 * entry, to a single rule for non-ranged and netmask set items, and to one
108 * or multiple rules for ranges. Ranges are expanded to composing netmasks
109 * by pipapo_expand().
110 *
111 * Example: 2 entries, 10.0.0.5:1024 and 192.168.1.0-192.168.2.1:2048
112 * - rule #0: 10.0.0.5
113 * - rule #1: 192.168.1.0/24
114 * - rule #2: 192.168.2.0/31
115 *
116 * - insert references to the rules in the lookup table, selecting buckets
117 * according to bit values of a rule in the given group. This is done by
118 * pipapo_insert().
119 *
120 * Example: given:
121 * - rule #0: 10.0.0.5 mapping to buckets
122 * < 0 10 0 0 0 0 0 5 >
123 * - rule #1: 192.168.1.0/24 mapping to buckets
124 * < 12 0 10 8 0 1 < 0..15 > < 0..15 > >
125 * - rule #2: 192.168.2.0/31 mapping to buckets
126 * < 12 0 10 8 0 2 0 < 0..1 > >
127 *
128 * these bits are set in the lookup table:
129 *
130 * ::
131 *
132 * bucket
133 * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
134 * 0 0 1,2
135 * 1 1,2 0
136 * 2 0 1,2
137 * 3 0 1,2
138 * 4 0,1,2
139 * 5 0 1 2
140 * 6 0,1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
141 * 7 1,2 1,2 1 1 1 0,1 1 1 1 1 1 1 1 1 1 1
142 *
143 * - if this is not the last field in the set, fill a mapping array that maps
144 * rules from the lookup table to rules belonging to the same entry in
145 * the next lookup table, done by pipapo_map().
146 *
147 * Note that as rules map to contiguous ranges of rules, given how netmask
148 * expansion and insertion is performed, &union nft_pipapo_map_bucket stores
149 * this information as pairs of first rule index, rule count.
150 *
151 * Example: 2 entries, 10.0.0.5:1024 and 192.168.1.0-192.168.2.1:2048,
152 * given lookup table #0 for field 0 (see example above):
153 *
154 * ::
155 *
156 * bucket
157 * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
158 * 0 0 1,2
159 * 1 1,2 0
160 * 2 0 1,2
161 * 3 0 1,2
162 * 4 0,1,2
163 * 5 0 1 2
164 * 6 0,1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
165 * 7 1,2 1,2 1 1 1 0,1 1 1 1 1 1 1 1 1 1 1
166 *
167 * and lookup table #1 for field 1 with:
168 * - rule #0: 1024 mapping to buckets
169 * < 0 0 4 0 >
170 * - rule #1: 2048 mapping to buckets
171 * < 0 0 5 0 >
172 *
173 * ::
174 *
175 * bucket
176 * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
177 * 0 0,1
178 * 1 0,1
179 * 2 0 1
180 * 3 0,1
181 *
182 * we need to map rules for 10.0.0.5 in lookup table #0 (rule #0) to 1024
183 * in lookup table #1 (rule #0) and rules for 192.168.1.0-192.168.2.1
184 * (rules #1, #2) to 2048 in lookup table #2 (rule #1):
185 *
186 * ::
187 *
188 * rule indices in current field: 0 1 2
189 * map to rules in next field: 0 1 1
190 *
191 * - if this is the last field in the set, fill a mapping array that maps
192 * rules from the last lookup table to element pointers, also done by
193 * pipapo_map().
194 *
195 * Note that, in this implementation, we have two elements (start, end) for
196 * each entry. The pointer to the end element is stored in this array, and
197 * the pointer to the start element is linked from it.
198 *
199 * Example: entry 10.0.0.5:1024 has a corresponding &struct nft_pipapo_elem
200 * pointer, 0x66, and element for 192.168.1.0-192.168.2.1:2048 is at 0x42.
201 * From the rules of lookup table #1 as mapped above:
202 *
203 * ::
204 *
205 * rule indices in last field: 0 1
206 * map to elements: 0x66 0x42
207 *
208 *
209 * Matching
210 * --------
211 *
212 * We use a result bitmap, with the size of a single lookup table bucket, to
213 * represent the matching state that applies at every algorithm step. This is
214 * done by pipapo_lookup().
215 *
216 * - For each packet field:
217 *
218 * - start with an all-ones result bitmap (res_map in pipapo_lookup())
219 *
220 * - perform a lookup into the table corresponding to the current field,
221 * for each group, and at every group, AND the current result bitmap with
222 * the value from the lookup table bucket
223 *
224 * ::
225 *
226 * Example: 192.168.1.5 < 12 0 10 8 0 1 0 5 >, with lookup table from
227 * insertion examples.
228 * Lookup table buckets are at least 3 bits wide, we'll assume 8 bits for
229 * convenience in this example. Initial result bitmap is 0xff, the steps
230 * below show the value of the result bitmap after each group is processed:
231 *
232 * bucket
233 * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
234 * 0 0 1,2
235 * result bitmap is now: 0xff & 0x6 [bucket 12] = 0x6
236 *
237 * 1 1,2 0
238 * result bitmap is now: 0x6 & 0x6 [bucket 0] = 0x6
239 *
240 * 2 0 1,2
241 * result bitmap is now: 0x6 & 0x6 [bucket 10] = 0x6
242 *
243 * 3 0 1,2
244 * result bitmap is now: 0x6 & 0x6 [bucket 8] = 0x6
245 *
246 * 4 0,1,2
247 * result bitmap is now: 0x6 & 0x7 [bucket 0] = 0x6
248 *
249 * 5 0 1 2
250 * result bitmap is now: 0x6 & 0x2 [bucket 1] = 0x2
251 *
252 * 6 0,1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
253 * result bitmap is now: 0x2 & 0x7 [bucket 0] = 0x2
254 *
255 * 7 1,2 1,2 1 1 1 0,1 1 1 1 1 1 1 1 1 1 1
256 * final result bitmap for this field is: 0x2 & 0x3 [bucket 5] = 0x2
257 *
258 * - at the next field, start with a new, all-zeroes result bitmap. For each
259 * bit set in the previous result bitmap, fill the new result bitmap
260 * (fill_map in pipapo_lookup()) with the rule indices from the
261 * corresponding buckets of the mapping field for this field, done by
262 * pipapo_refill()
263 *
264 * Example: with mapping table from insertion examples, with the current
265 * result bitmap from the previous example, 0x02:
266 *
267 * ::
268 *
269 * rule indices in current field: 0 1 2
270 * map to rules in next field: 0 1 1
271 *
272 * the new result bitmap will be 0x02: rule 1 was set, and rule 1 will be
273 * set.
274 *
275 * We can now extend this example to cover the second iteration of the step
276 * above (lookup and AND bitmap): assuming the port field is
277 * 2048 < 0 0 5 0 >, with starting result bitmap 0x2, and lookup table
278 * for "port" field from pre-computation example:
279 *
280 * ::
281 *
282 * bucket
283 * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
284 * 0 0,1
285 * 1 0,1
286 * 2 0 1
287 * 3 0,1
288 *
289 * operations are: 0x2 & 0x3 [bucket 0] & 0x3 [bucket 0] & 0x2 [bucket 5]
290 * & 0x3 [bucket 0], resulting bitmap is 0x2.
291 *
292 * - if this is the last field in the set, look up the value from the mapping
293 * array corresponding to the final result bitmap
294 *
295 * Example: 0x2 resulting bitmap from 192.168.1.5:2048, mapping array for
296 * last field from insertion example:
297 *
298 * ::
299 *
300 * rule indices in last field: 0 1
301 * map to elements: 0x66 0x42
302 *
303 * the matching element is at 0x42.
304 *
305 *
306 * References
307 * ----------
308 *
309 * [Ligatti 2010]
310 * A Packet-classification Algorithm for Arbitrary Bitmask Rules, with
311 * Automatic Time-space Tradeoffs
312 * Jay Ligatti, Josh Kuhn, and Chris Gage.
313 * Proceedings of the IEEE International Conference on Computer
314 * Communication Networks (ICCCN), August 2010.
315 * https://www.cse.usf.edu/~ligatti/papers/grouper-conf.pdf
316 *
317 * [Rottenstreich 2010]
318 * Worst-Case TCAM Rule Expansion
319 * Ori Rottenstreich and Isaac Keslassy.
320 * 2010 Proceedings IEEE INFOCOM, San Diego, CA, 2010.
321 * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.212.4592&rep=rep1&type=pdf
322 *
323 * [Kogan 2014]
324 * SAX-PAC (Scalable And eXpressive PAcket Classification)
325 * Kirill Kogan, Sergey Nikolenko, Ori Rottenstreich, William Culhane,
326 * and Patrick Eugster.
327 * Proceedings of the 2014 ACM conference on SIGCOMM, August 2014.
328 * https://www.sigcomm.org/sites/default/files/ccr/papers/2014/August/2619239-2626294.pdf
329 */
330
331 #include <linux/kernel.h>
332 #include <linux/init.h>
333 #include <linux/module.h>
334 #include <linux/netlink.h>
335 #include <linux/netfilter.h>
336 #include <linux/netfilter/nf_tables.h>
337 #include <net/netfilter/nf_tables_core.h>
338 #include <uapi/linux/netfilter/nf_tables.h>
339 #include <linux/bitmap.h>
340 #include <linux/bitops.h>
341
342 #include "nft_set_pipapo_avx2.h"
343 #include "nft_set_pipapo.h"
344
345 /**
346 * pipapo_refill() - For each set bit, set bits from selected mapping table item
347 * @map: Bitmap to be scanned for set bits
348 * @len: Length of bitmap in longs
349 * @rules: Number of rules in field
350 * @dst: Destination bitmap
351 * @mt: Mapping table containing bit set specifiers
352 * @match_only: Find a single bit and return, don't fill
353 *
354 * Iteration over set bits with __builtin_ctzl(): Daniel Lemire, public domain.
355 *
356 * For each bit set in map, select the bucket from mapping table with index
357 * corresponding to the position of the bit set. Use start bit and amount of
358 * bits specified in bucket to fill region in dst.
359 *
360 * Return: -1 on no match, bit position on 'match_only', 0 otherwise.
361 */
pipapo_refill(unsigned long * map,int len,int rules,unsigned long * dst,const union nft_pipapo_map_bucket * mt,bool match_only)362 int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
363 const union nft_pipapo_map_bucket *mt, bool match_only)
364 {
365 unsigned long bitset;
366 int k, ret = -1;
367
368 for (k = 0; k < len; k++) {
369 bitset = map[k];
370 while (bitset) {
371 unsigned long t = bitset & -bitset;
372 int r = __builtin_ctzl(bitset);
373 int i = k * BITS_PER_LONG + r;
374
375 if (unlikely(i >= rules)) {
376 map[k] = 0;
377 return -1;
378 }
379
380 if (match_only) {
381 bitmap_clear(map, i, 1);
382 return i;
383 }
384
385 ret = 0;
386
387 bitmap_set(dst, mt[i].to, mt[i].n);
388
389 bitset ^= t;
390 }
391 map[k] = 0;
392 }
393
394 return ret;
395 }
396
397 /**
398 * nft_pipapo_lookup() - Lookup function
399 * @net: Network namespace
400 * @set: nftables API set representation
401 * @key: nftables API element representation containing key data
402 * @ext: nftables API extension pointer, filled with matching reference
403 *
404 * For more details, see DOC: Theory of Operation.
405 *
406 * Return: true on match, false otherwise.
407 */
nft_pipapo_lookup(const struct net * net,const struct nft_set * set,const u32 * key,const struct nft_set_ext ** ext)408 bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
409 const u32 *key, const struct nft_set_ext **ext)
410 {
411 struct nft_pipapo *priv = nft_set_priv(set);
412 struct nft_pipapo_scratch *scratch;
413 unsigned long *res_map, *fill_map;
414 u8 genmask = nft_genmask_cur(net);
415 const struct nft_pipapo_match *m;
416 const struct nft_pipapo_field *f;
417 const u8 *rp = (const u8 *)key;
418 bool map_index;
419 int i;
420
421 local_bh_disable();
422
423 m = rcu_dereference(priv->match);
424
425 if (unlikely(!m || !*raw_cpu_ptr(m->scratch)))
426 goto out;
427
428 scratch = *raw_cpu_ptr(m->scratch);
429
430 map_index = scratch->map_index;
431
432 res_map = scratch->map + (map_index ? m->bsize_max : 0);
433 fill_map = scratch->map + (map_index ? 0 : m->bsize_max);
434
435 pipapo_resmap_init(m, res_map);
436
437 nft_pipapo_for_each_field(f, i, m) {
438 bool last = i == m->field_count - 1;
439 int b;
440
441 /* For each bit group: select lookup table bucket depending on
442 * packet bytes value, then AND bucket value
443 */
444 if (likely(f->bb == 8))
445 pipapo_and_field_buckets_8bit(f, res_map, rp);
446 else
447 pipapo_and_field_buckets_4bit(f, res_map, rp);
448 NFT_PIPAPO_GROUP_BITS_ARE_8_OR_4;
449
450 rp += f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f);
451
452 /* Now populate the bitmap for the next field, unless this is
453 * the last field, in which case return the matched 'ext'
454 * pointer if any.
455 *
456 * Now res_map contains the matching bitmap, and fill_map is the
457 * bitmap for the next field.
458 */
459 next_match:
460 b = pipapo_refill(res_map, f->bsize, f->rules, fill_map, f->mt,
461 last);
462 if (b < 0) {
463 scratch->map_index = map_index;
464 local_bh_enable();
465
466 return false;
467 }
468
469 if (last) {
470 *ext = &f->mt[b].e->ext;
471 if (unlikely(nft_set_elem_expired(*ext) ||
472 !nft_set_elem_active(*ext, genmask)))
473 goto next_match;
474
475 /* Last field: we're just returning the key without
476 * filling the initial bitmap for the next field, so the
477 * current inactive bitmap is clean and can be reused as
478 * *next* bitmap (not initial) for the next packet.
479 */
480 scratch->map_index = map_index;
481 local_bh_enable();
482
483 return true;
484 }
485
486 /* Swap bitmap indices: res_map is the initial bitmap for the
487 * next field, and fill_map is guaranteed to be all-zeroes at
488 * this point.
489 */
490 map_index = !map_index;
491 swap(res_map, fill_map);
492
493 rp += NFT_PIPAPO_GROUPS_PADDING(f);
494 }
495
496 out:
497 local_bh_enable();
498 return false;
499 }
500
501 /**
502 * pipapo_get() - Get matching element reference given key data
503 * @net: Network namespace
504 * @set: nftables API set representation
505 * @data: Key data to be matched against existing elements
506 * @genmask: If set, check that element is active in given genmask
507 *
508 * This is essentially the same as the lookup function, except that it matches
509 * key data against the uncommitted copy and doesn't use preallocated maps for
510 * bitmap results.
511 *
512 * Return: pointer to &struct nft_pipapo_elem on match, error pointer otherwise.
513 */
pipapo_get(const struct net * net,const struct nft_set * set,const u8 * data,u8 genmask)514 static struct nft_pipapo_elem *pipapo_get(const struct net *net,
515 const struct nft_set *set,
516 const u8 *data, u8 genmask)
517 {
518 struct nft_pipapo_elem *ret = ERR_PTR(-ENOENT);
519 struct nft_pipapo *priv = nft_set_priv(set);
520 unsigned long *res_map, *fill_map = NULL;
521 const struct nft_pipapo_match *m;
522 const struct nft_pipapo_field *f;
523 int i;
524
525 m = priv->clone;
526
527 res_map = kmalloc_array(m->bsize_max, sizeof(*res_map), GFP_ATOMIC);
528 if (!res_map) {
529 ret = ERR_PTR(-ENOMEM);
530 goto out;
531 }
532
533 fill_map = kcalloc(m->bsize_max, sizeof(*res_map), GFP_ATOMIC);
534 if (!fill_map) {
535 ret = ERR_PTR(-ENOMEM);
536 goto out;
537 }
538
539 pipapo_resmap_init(m, res_map);
540
541 nft_pipapo_for_each_field(f, i, m) {
542 bool last = i == m->field_count - 1;
543 int b;
544
545 /* For each bit group: select lookup table bucket depending on
546 * packet bytes value, then AND bucket value
547 */
548 if (f->bb == 8)
549 pipapo_and_field_buckets_8bit(f, res_map, data);
550 else if (f->bb == 4)
551 pipapo_and_field_buckets_4bit(f, res_map, data);
552 else
553 BUG();
554
555 data += f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f);
556
557 /* Now populate the bitmap for the next field, unless this is
558 * the last field, in which case return the matched 'ext'
559 * pointer if any.
560 *
561 * Now res_map contains the matching bitmap, and fill_map is the
562 * bitmap for the next field.
563 */
564 next_match:
565 b = pipapo_refill(res_map, f->bsize, f->rules, fill_map, f->mt,
566 last);
567 if (b < 0)
568 goto out;
569
570 if (last) {
571 if (nft_set_elem_expired(&f->mt[b].e->ext))
572 goto next_match;
573 if ((genmask &&
574 !nft_set_elem_active(&f->mt[b].e->ext, genmask)))
575 goto next_match;
576
577 ret = f->mt[b].e;
578 goto out;
579 }
580
581 data += NFT_PIPAPO_GROUPS_PADDING(f);
582
583 /* Swap bitmap indices: fill_map will be the initial bitmap for
584 * the next field (i.e. the new res_map), and res_map is
585 * guaranteed to be all-zeroes at this point, ready to be filled
586 * according to the next mapping table.
587 */
588 swap(res_map, fill_map);
589 }
590
591 out:
592 kfree(fill_map);
593 kfree(res_map);
594 return ret;
595 }
596
597 /**
598 * nft_pipapo_get() - Get matching element reference given key data
599 * @net: Network namespace
600 * @set: nftables API set representation
601 * @elem: nftables API element representation containing key data
602 * @flags: Unused
603 */
nft_pipapo_get(const struct net * net,const struct nft_set * set,const struct nft_set_elem * elem,unsigned int flags)604 static void *nft_pipapo_get(const struct net *net, const struct nft_set *set,
605 const struct nft_set_elem *elem, unsigned int flags)
606 {
607 return pipapo_get(net, set, (const u8 *)elem->key.val.data,
608 nft_genmask_cur(net));
609 }
610
611 /**
612 * pipapo_resize() - Resize lookup or mapping table, or both
613 * @f: Field containing lookup and mapping tables
614 * @old_rules: Previous amount of rules in field
615 * @rules: New amount of rules
616 *
617 * Increase, decrease or maintain tables size depending on new amount of rules,
618 * and copy data over. In case the new size is smaller, throw away data for
619 * highest-numbered rules.
620 *
621 * Return: 0 on success, -ENOMEM on allocation failure.
622 */
pipapo_resize(struct nft_pipapo_field * f,int old_rules,int rules)623 static int pipapo_resize(struct nft_pipapo_field *f, int old_rules, int rules)
624 {
625 long *new_lt = NULL, *new_p, *old_lt = f->lt, *old_p;
626 union nft_pipapo_map_bucket *new_mt, *old_mt = f->mt;
627 size_t new_bucket_size, copy;
628 int group, bucket;
629
630 new_bucket_size = DIV_ROUND_UP(rules, BITS_PER_LONG);
631 #ifdef NFT_PIPAPO_ALIGN
632 new_bucket_size = roundup(new_bucket_size,
633 NFT_PIPAPO_ALIGN / sizeof(*new_lt));
634 #endif
635
636 if (new_bucket_size == f->bsize)
637 goto mt;
638
639 if (new_bucket_size > f->bsize)
640 copy = f->bsize;
641 else
642 copy = new_bucket_size;
643
644 new_lt = kvzalloc(f->groups * NFT_PIPAPO_BUCKETS(f->bb) *
645 new_bucket_size * sizeof(*new_lt) +
646 NFT_PIPAPO_ALIGN_HEADROOM,
647 GFP_KERNEL);
648 if (!new_lt)
649 return -ENOMEM;
650
651 new_p = NFT_PIPAPO_LT_ALIGN(new_lt);
652 old_p = NFT_PIPAPO_LT_ALIGN(old_lt);
653
654 for (group = 0; group < f->groups; group++) {
655 for (bucket = 0; bucket < NFT_PIPAPO_BUCKETS(f->bb); bucket++) {
656 memcpy(new_p, old_p, copy * sizeof(*new_p));
657 new_p += copy;
658 old_p += copy;
659
660 if (new_bucket_size > f->bsize)
661 new_p += new_bucket_size - f->bsize;
662 else
663 old_p += f->bsize - new_bucket_size;
664 }
665 }
666
667 mt:
668 new_mt = kvmalloc(rules * sizeof(*new_mt), GFP_KERNEL);
669 if (!new_mt) {
670 kvfree(new_lt);
671 return -ENOMEM;
672 }
673
674 memcpy(new_mt, f->mt, min(old_rules, rules) * sizeof(*new_mt));
675 if (rules > old_rules) {
676 memset(new_mt + old_rules, 0,
677 (rules - old_rules) * sizeof(*new_mt));
678 }
679
680 if (new_lt) {
681 f->bsize = new_bucket_size;
682 NFT_PIPAPO_LT_ASSIGN(f, new_lt);
683 kvfree(old_lt);
684 }
685
686 f->mt = new_mt;
687 kvfree(old_mt);
688
689 return 0;
690 }
691
692 /**
693 * pipapo_bucket_set() - Set rule bit in bucket given group and group value
694 * @f: Field containing lookup table
695 * @rule: Rule index
696 * @group: Group index
697 * @v: Value of bit group
698 */
pipapo_bucket_set(struct nft_pipapo_field * f,int rule,int group,int v)699 static void pipapo_bucket_set(struct nft_pipapo_field *f, int rule, int group,
700 int v)
701 {
702 unsigned long *pos;
703
704 pos = NFT_PIPAPO_LT_ALIGN(f->lt);
705 pos += f->bsize * NFT_PIPAPO_BUCKETS(f->bb) * group;
706 pos += f->bsize * v;
707
708 __set_bit(rule, pos);
709 }
710
711 /**
712 * pipapo_lt_4b_to_8b() - Switch lookup table group width from 4 bits to 8 bits
713 * @old_groups: Number of current groups
714 * @bsize: Size of one bucket, in longs
715 * @old_lt: Pointer to the current lookup table
716 * @new_lt: Pointer to the new, pre-allocated lookup table
717 *
718 * Each bucket with index b in the new lookup table, belonging to group g, is
719 * filled with the bit intersection between:
720 * - bucket with index given by the upper 4 bits of b, from group g, and
721 * - bucket with index given by the lower 4 bits of b, from group g + 1
722 *
723 * That is, given buckets from the new lookup table N(x, y) and the old lookup
724 * table O(x, y), with x bucket index, and y group index:
725 *
726 * N(b, g) := O(b / 16, g) & O(b % 16, g + 1)
727 *
728 * This ensures equivalence of the matching results on lookup. Two examples in
729 * pictures:
730 *
731 * bucket
732 * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 ... 254 255
733 * 0 ^
734 * 1 | ^
735 * ... ( & ) |
736 * / \ |
737 * / \ .-( & )-.
738 * / bucket \ | |
739 * group 0 / 1 2 3 \ 4 5 6 7 8 9 10 11 12 13 |14 15 |
740 * 0 / \ | |
741 * 1 \ | |
742 * 2 | --'
743 * 3 '-
744 * ...
745 */
pipapo_lt_4b_to_8b(int old_groups,int bsize,unsigned long * old_lt,unsigned long * new_lt)746 static void pipapo_lt_4b_to_8b(int old_groups, int bsize,
747 unsigned long *old_lt, unsigned long *new_lt)
748 {
749 int g, b, i;
750
751 for (g = 0; g < old_groups / 2; g++) {
752 int src_g0 = g * 2, src_g1 = g * 2 + 1;
753
754 for (b = 0; b < NFT_PIPAPO_BUCKETS(8); b++) {
755 int src_b0 = b / NFT_PIPAPO_BUCKETS(4);
756 int src_b1 = b % NFT_PIPAPO_BUCKETS(4);
757 int src_i0 = src_g0 * NFT_PIPAPO_BUCKETS(4) + src_b0;
758 int src_i1 = src_g1 * NFT_PIPAPO_BUCKETS(4) + src_b1;
759
760 for (i = 0; i < bsize; i++) {
761 *new_lt = old_lt[src_i0 * bsize + i] &
762 old_lt[src_i1 * bsize + i];
763 new_lt++;
764 }
765 }
766 }
767 }
768
769 /**
770 * pipapo_lt_8b_to_4b() - Switch lookup table group width from 8 bits to 4 bits
771 * @old_groups: Number of current groups
772 * @bsize: Size of one bucket, in longs
773 * @old_lt: Pointer to the current lookup table
774 * @new_lt: Pointer to the new, pre-allocated lookup table
775 *
776 * Each bucket with index b in the new lookup table, belonging to group g, is
777 * filled with the bit union of:
778 * - all the buckets with index such that the upper four bits of the lower byte
779 * equal b, from group g, with g odd
780 * - all the buckets with index such that the lower four bits equal b, from
781 * group g, with g even
782 *
783 * That is, given buckets from the new lookup table N(x, y) and the old lookup
784 * table O(x, y), with x bucket index, and y group index:
785 *
786 * - with g odd: N(b, g) := U(O(x, g) for each x : x = (b & 0xf0) >> 4)
787 * - with g even: N(b, g) := U(O(x, g) for each x : x = b & 0x0f)
788 *
789 * where U() denotes the arbitrary union operation (binary OR of n terms). This
790 * ensures equivalence of the matching results on lookup.
791 */
pipapo_lt_8b_to_4b(int old_groups,int bsize,unsigned long * old_lt,unsigned long * new_lt)792 static void pipapo_lt_8b_to_4b(int old_groups, int bsize,
793 unsigned long *old_lt, unsigned long *new_lt)
794 {
795 int g, b, bsrc, i;
796
797 memset(new_lt, 0, old_groups * 2 * NFT_PIPAPO_BUCKETS(4) * bsize *
798 sizeof(unsigned long));
799
800 for (g = 0; g < old_groups * 2; g += 2) {
801 int src_g = g / 2;
802
803 for (b = 0; b < NFT_PIPAPO_BUCKETS(4); b++) {
804 for (bsrc = NFT_PIPAPO_BUCKETS(8) * src_g;
805 bsrc < NFT_PIPAPO_BUCKETS(8) * (src_g + 1);
806 bsrc++) {
807 if (((bsrc & 0xf0) >> 4) != b)
808 continue;
809
810 for (i = 0; i < bsize; i++)
811 new_lt[i] |= old_lt[bsrc * bsize + i];
812 }
813
814 new_lt += bsize;
815 }
816
817 for (b = 0; b < NFT_PIPAPO_BUCKETS(4); b++) {
818 for (bsrc = NFT_PIPAPO_BUCKETS(8) * src_g;
819 bsrc < NFT_PIPAPO_BUCKETS(8) * (src_g + 1);
820 bsrc++) {
821 if ((bsrc & 0x0f) != b)
822 continue;
823
824 for (i = 0; i < bsize; i++)
825 new_lt[i] |= old_lt[bsrc * bsize + i];
826 }
827
828 new_lt += bsize;
829 }
830 }
831 }
832
833 /**
834 * pipapo_lt_bits_adjust() - Adjust group size for lookup table if needed
835 * @f: Field containing lookup table
836 */
pipapo_lt_bits_adjust(struct nft_pipapo_field * f)837 static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
838 {
839 unsigned long *new_lt;
840 int groups, bb;
841 size_t lt_size;
842
843 lt_size = f->groups * NFT_PIPAPO_BUCKETS(f->bb) * f->bsize *
844 sizeof(*f->lt);
845
846 if (f->bb == NFT_PIPAPO_GROUP_BITS_SMALL_SET &&
847 lt_size > NFT_PIPAPO_LT_SIZE_HIGH) {
848 groups = f->groups * 2;
849 bb = NFT_PIPAPO_GROUP_BITS_LARGE_SET;
850
851 lt_size = groups * NFT_PIPAPO_BUCKETS(bb) * f->bsize *
852 sizeof(*f->lt);
853 } else if (f->bb == NFT_PIPAPO_GROUP_BITS_LARGE_SET &&
854 lt_size < NFT_PIPAPO_LT_SIZE_LOW) {
855 groups = f->groups / 2;
856 bb = NFT_PIPAPO_GROUP_BITS_SMALL_SET;
857
858 lt_size = groups * NFT_PIPAPO_BUCKETS(bb) * f->bsize *
859 sizeof(*f->lt);
860
861 /* Don't increase group width if the resulting lookup table size
862 * would exceed the upper size threshold for a "small" set.
863 */
864 if (lt_size > NFT_PIPAPO_LT_SIZE_HIGH)
865 return;
866 } else {
867 return;
868 }
869
870 new_lt = kvzalloc(lt_size + NFT_PIPAPO_ALIGN_HEADROOM, GFP_KERNEL);
871 if (!new_lt)
872 return;
873
874 NFT_PIPAPO_GROUP_BITS_ARE_8_OR_4;
875 if (f->bb == 4 && bb == 8) {
876 pipapo_lt_4b_to_8b(f->groups, f->bsize,
877 NFT_PIPAPO_LT_ALIGN(f->lt),
878 NFT_PIPAPO_LT_ALIGN(new_lt));
879 } else if (f->bb == 8 && bb == 4) {
880 pipapo_lt_8b_to_4b(f->groups, f->bsize,
881 NFT_PIPAPO_LT_ALIGN(f->lt),
882 NFT_PIPAPO_LT_ALIGN(new_lt));
883 } else {
884 BUG();
885 }
886
887 f->groups = groups;
888 f->bb = bb;
889 kvfree(f->lt);
890 NFT_PIPAPO_LT_ASSIGN(f, new_lt);
891 }
892
893 /**
894 * pipapo_insert() - Insert new rule in field given input key and mask length
895 * @f: Field containing lookup table
896 * @k: Input key for classification, without nftables padding
897 * @mask_bits: Length of mask; matches field length for non-ranged entry
898 *
899 * Insert a new rule reference in lookup buckets corresponding to k and
900 * mask_bits.
901 *
902 * Return: 1 on success (one rule inserted), negative error code on failure.
903 */
pipapo_insert(struct nft_pipapo_field * f,const uint8_t * k,int mask_bits)904 static int pipapo_insert(struct nft_pipapo_field *f, const uint8_t *k,
905 int mask_bits)
906 {
907 int rule = f->rules, group, ret, bit_offset = 0;
908
909 ret = pipapo_resize(f, f->rules, f->rules + 1);
910 if (ret)
911 return ret;
912
913 f->rules++;
914
915 for (group = 0; group < f->groups; group++) {
916 int i, v;
917 u8 mask;
918
919 v = k[group / (BITS_PER_BYTE / f->bb)];
920 v &= GENMASK(BITS_PER_BYTE - bit_offset - 1, 0);
921 v >>= (BITS_PER_BYTE - bit_offset) - f->bb;
922
923 bit_offset += f->bb;
924 bit_offset %= BITS_PER_BYTE;
925
926 if (mask_bits >= (group + 1) * f->bb) {
927 /* Not masked */
928 pipapo_bucket_set(f, rule, group, v);
929 } else if (mask_bits <= group * f->bb) {
930 /* Completely masked */
931 for (i = 0; i < NFT_PIPAPO_BUCKETS(f->bb); i++)
932 pipapo_bucket_set(f, rule, group, i);
933 } else {
934 /* The mask limit falls on this group */
935 mask = GENMASK(f->bb - 1, 0);
936 mask >>= mask_bits - group * f->bb;
937 for (i = 0; i < NFT_PIPAPO_BUCKETS(f->bb); i++) {
938 if ((i & ~mask) == (v & ~mask))
939 pipapo_bucket_set(f, rule, group, i);
940 }
941 }
942 }
943
944 pipapo_lt_bits_adjust(f);
945
946 return 1;
947 }
948
949 /**
950 * pipapo_step_diff() - Check if setting @step bit in netmask would change it
951 * @base: Mask we are expanding
952 * @step: Step bit for given expansion step
953 * @len: Total length of mask space (set and unset bits), bytes
954 *
955 * Convenience function for mask expansion.
956 *
957 * Return: true if step bit changes mask (i.e. isn't set), false otherwise.
958 */
pipapo_step_diff(u8 * base,int step,int len)959 static bool pipapo_step_diff(u8 *base, int step, int len)
960 {
961 /* Network order, byte-addressed */
962 #ifdef __BIG_ENDIAN__
963 return !(BIT(step % BITS_PER_BYTE) & base[step / BITS_PER_BYTE]);
964 #else
965 return !(BIT(step % BITS_PER_BYTE) &
966 base[len - 1 - step / BITS_PER_BYTE]);
967 #endif
968 }
969
970 /**
971 * pipapo_step_after_end() - Check if mask exceeds range end with given step
972 * @base: Mask we are expanding
973 * @end: End of range
974 * @step: Step bit for given expansion step, highest bit to be set
975 * @len: Total length of mask space (set and unset bits), bytes
976 *
977 * Convenience function for mask expansion.
978 *
979 * Return: true if mask exceeds range setting step bits, false otherwise.
980 */
pipapo_step_after_end(const u8 * base,const u8 * end,int step,int len)981 static bool pipapo_step_after_end(const u8 *base, const u8 *end, int step,
982 int len)
983 {
984 u8 tmp[NFT_PIPAPO_MAX_BYTES];
985 int i;
986
987 memcpy(tmp, base, len);
988
989 /* Network order, byte-addressed */
990 for (i = 0; i <= step; i++)
991 #ifdef __BIG_ENDIAN__
992 tmp[i / BITS_PER_BYTE] |= BIT(i % BITS_PER_BYTE);
993 #else
994 tmp[len - 1 - i / BITS_PER_BYTE] |= BIT(i % BITS_PER_BYTE);
995 #endif
996
997 return memcmp(tmp, end, len) > 0;
998 }
999
1000 /**
1001 * pipapo_base_sum() - Sum step bit to given len-sized netmask base with carry
1002 * @base: Netmask base
1003 * @step: Step bit to sum
1004 * @len: Netmask length, bytes
1005 */
pipapo_base_sum(u8 * base,int step,int len)1006 static void pipapo_base_sum(u8 *base, int step, int len)
1007 {
1008 bool carry = false;
1009 int i;
1010
1011 /* Network order, byte-addressed */
1012 #ifdef __BIG_ENDIAN__
1013 for (i = step / BITS_PER_BYTE; i < len; i++) {
1014 #else
1015 for (i = len - 1 - step / BITS_PER_BYTE; i >= 0; i--) {
1016 #endif
1017 if (carry)
1018 base[i]++;
1019 else
1020 base[i] += 1 << (step % BITS_PER_BYTE);
1021
1022 if (base[i])
1023 break;
1024
1025 carry = true;
1026 }
1027 }
1028
1029 /**
1030 * pipapo_expand() - Expand to composing netmasks, insert into lookup table
1031 * @f: Field containing lookup table
1032 * @start: Start of range
1033 * @end: End of range
1034 * @len: Length of value in bits
1035 *
1036 * Expand range to composing netmasks and insert corresponding rule references
1037 * in lookup buckets.
1038 *
1039 * Return: number of inserted rules on success, negative error code on failure.
1040 */
1041 static int pipapo_expand(struct nft_pipapo_field *f,
1042 const u8 *start, const u8 *end, int len)
1043 {
1044 int step, masks = 0, bytes = DIV_ROUND_UP(len, BITS_PER_BYTE);
1045 u8 base[NFT_PIPAPO_MAX_BYTES];
1046
1047 memcpy(base, start, bytes);
1048 while (memcmp(base, end, bytes) <= 0) {
1049 int err;
1050
1051 step = 0;
1052 while (pipapo_step_diff(base, step, bytes)) {
1053 if (pipapo_step_after_end(base, end, step, bytes))
1054 break;
1055
1056 step++;
1057 if (step >= len) {
1058 if (!masks) {
1059 err = pipapo_insert(f, base, 0);
1060 if (err < 0)
1061 return err;
1062 masks = 1;
1063 }
1064 goto out;
1065 }
1066 }
1067
1068 err = pipapo_insert(f, base, len - step);
1069
1070 if (err < 0)
1071 return err;
1072
1073 masks++;
1074 pipapo_base_sum(base, step, bytes);
1075 }
1076 out:
1077 return masks;
1078 }
1079
1080 /**
1081 * pipapo_map() - Insert rules in mapping tables, mapping them between fields
1082 * @m: Matching data, including mapping table
1083 * @map: Table of rule maps: array of first rule and amount of rules
1084 * in next field a given rule maps to, for each field
1085 * @e: For last field, nft_set_ext pointer matching rules map to
1086 */
1087 static void pipapo_map(struct nft_pipapo_match *m,
1088 union nft_pipapo_map_bucket map[NFT_PIPAPO_MAX_FIELDS],
1089 struct nft_pipapo_elem *e)
1090 {
1091 struct nft_pipapo_field *f;
1092 int i, j;
1093
1094 for (i = 0, f = m->f; i < m->field_count - 1; i++, f++) {
1095 for (j = 0; j < map[i].n; j++) {
1096 f->mt[map[i].to + j].to = map[i + 1].to;
1097 f->mt[map[i].to + j].n = map[i + 1].n;
1098 }
1099 }
1100
1101 /* Last field: map to ext instead of mapping to next field */
1102 for (j = 0; j < map[i].n; j++)
1103 f->mt[map[i].to + j].e = e;
1104 }
1105
1106 /**
1107 * pipapo_free_scratch() - Free per-CPU map at original (not aligned) address
1108 * @m: Matching data
1109 * @cpu: CPU number
1110 */
1111 static void pipapo_free_scratch(const struct nft_pipapo_match *m, unsigned int cpu)
1112 {
1113 struct nft_pipapo_scratch *s;
1114 void *mem;
1115
1116 s = *per_cpu_ptr(m->scratch, cpu);
1117 if (!s)
1118 return;
1119
1120 mem = s;
1121 mem -= s->align_off;
1122 kfree(mem);
1123 }
1124
1125 /**
1126 * pipapo_realloc_scratch() - Reallocate scratch maps for partial match results
1127 * @clone: Copy of matching data with pending insertions and deletions
1128 * @bsize_max: Maximum bucket size, scratch maps cover two buckets
1129 *
1130 * Return: 0 on success, -ENOMEM on failure.
1131 */
1132 static int pipapo_realloc_scratch(struct nft_pipapo_match *clone,
1133 unsigned long bsize_max)
1134 {
1135 int i;
1136
1137 for_each_possible_cpu(i) {
1138 struct nft_pipapo_scratch *scratch;
1139 #ifdef NFT_PIPAPO_ALIGN
1140 void *scratch_aligned;
1141 u32 align_off;
1142 #endif
1143 scratch = kzalloc_node(struct_size(scratch, map,
1144 bsize_max * 2) +
1145 NFT_PIPAPO_ALIGN_HEADROOM,
1146 GFP_KERNEL, cpu_to_node(i));
1147 if (!scratch) {
1148 /* On failure, there's no need to undo previous
1149 * allocations: this means that some scratch maps have
1150 * a bigger allocated size now (this is only called on
1151 * insertion), but the extra space won't be used by any
1152 * CPU as new elements are not inserted and m->bsize_max
1153 * is not updated.
1154 */
1155 return -ENOMEM;
1156 }
1157
1158 pipapo_free_scratch(clone, i);
1159
1160 #ifdef NFT_PIPAPO_ALIGN
1161 /* Align &scratch->map (not the struct itself): the extra
1162 * %NFT_PIPAPO_ALIGN_HEADROOM bytes passed to kzalloc_node()
1163 * above guarantee we can waste up to those bytes in order
1164 * to align the map field regardless of its offset within
1165 * the struct.
1166 */
1167 BUILD_BUG_ON(offsetof(struct nft_pipapo_scratch, map) > NFT_PIPAPO_ALIGN_HEADROOM);
1168
1169 scratch_aligned = NFT_PIPAPO_LT_ALIGN(&scratch->map);
1170 scratch_aligned -= offsetof(struct nft_pipapo_scratch, map);
1171 align_off = scratch_aligned - (void *)scratch;
1172
1173 scratch = scratch_aligned;
1174 scratch->align_off = align_off;
1175 #endif
1176 *per_cpu_ptr(clone->scratch, i) = scratch;
1177 }
1178
1179 return 0;
1180 }
1181
1182 /**
1183 * nft_pipapo_insert() - Validate and insert ranged elements
1184 * @net: Network namespace
1185 * @set: nftables API set representation
1186 * @elem: nftables API element representation containing key data
1187 * @ext2: Filled with pointer to &struct nft_set_ext in inserted element
1188 *
1189 * Return: 0 on success, error pointer on failure.
1190 */
1191 static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
1192 const struct nft_set_elem *elem,
1193 struct nft_set_ext **ext2)
1194 {
1195 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
1196 union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
1197 const u8 *start = (const u8 *)elem->key.val.data, *end;
1198 struct nft_pipapo_elem *e = elem->priv, *dup;
1199 struct nft_pipapo *priv = nft_set_priv(set);
1200 struct nft_pipapo_match *m = priv->clone;
1201 u8 genmask = nft_genmask_next(net);
1202 struct nft_pipapo_field *f;
1203 const u8 *start_p, *end_p;
1204 int i, bsize_max, err = 0;
1205
1206 if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END))
1207 end = (const u8 *)nft_set_ext_key_end(ext)->data;
1208 else
1209 end = start;
1210
1211 dup = pipapo_get(net, set, start, genmask);
1212 if (!IS_ERR(dup)) {
1213 /* Check if we already have the same exact entry */
1214 const struct nft_data *dup_key, *dup_end;
1215
1216 dup_key = nft_set_ext_key(&dup->ext);
1217 if (nft_set_ext_exists(&dup->ext, NFT_SET_EXT_KEY_END))
1218 dup_end = nft_set_ext_key_end(&dup->ext);
1219 else
1220 dup_end = dup_key;
1221
1222 if (!memcmp(start, dup_key->data, sizeof(*dup_key->data)) &&
1223 !memcmp(end, dup_end->data, sizeof(*dup_end->data))) {
1224 *ext2 = &dup->ext;
1225 return -EEXIST;
1226 }
1227
1228 return -ENOTEMPTY;
1229 }
1230
1231 if (PTR_ERR(dup) == -ENOENT) {
1232 /* Look for partially overlapping entries */
1233 dup = pipapo_get(net, set, end, nft_genmask_next(net));
1234 }
1235
1236 if (PTR_ERR(dup) != -ENOENT) {
1237 if (IS_ERR(dup))
1238 return PTR_ERR(dup);
1239 *ext2 = &dup->ext;
1240 return -ENOTEMPTY;
1241 }
1242
1243 /* Validate */
1244 start_p = start;
1245 end_p = end;
1246 nft_pipapo_for_each_field(f, i, m) {
1247 if (f->rules >= (unsigned long)NFT_PIPAPO_RULE0_MAX)
1248 return -ENOSPC;
1249
1250 if (memcmp(start_p, end_p,
1251 f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f)) > 0)
1252 return -EINVAL;
1253
1254 start_p += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
1255 end_p += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
1256 }
1257
1258 /* Insert */
1259 priv->dirty = true;
1260
1261 bsize_max = m->bsize_max;
1262
1263 nft_pipapo_for_each_field(f, i, m) {
1264 int ret;
1265
1266 rulemap[i].to = f->rules;
1267
1268 ret = memcmp(start, end,
1269 f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f));
1270 if (!ret)
1271 ret = pipapo_insert(f, start, f->groups * f->bb);
1272 else
1273 ret = pipapo_expand(f, start, end, f->groups * f->bb);
1274
1275 if (ret < 0)
1276 return ret;
1277
1278 if (f->bsize > bsize_max)
1279 bsize_max = f->bsize;
1280
1281 rulemap[i].n = ret;
1282
1283 start += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
1284 end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
1285 }
1286
1287 if (!*get_cpu_ptr(m->scratch) || bsize_max > m->bsize_max) {
1288 put_cpu_ptr(m->scratch);
1289
1290 err = pipapo_realloc_scratch(m, bsize_max);
1291 if (err)
1292 return err;
1293
1294 m->bsize_max = bsize_max;
1295 } else {
1296 put_cpu_ptr(m->scratch);
1297 }
1298
1299 *ext2 = &e->ext;
1300
1301 pipapo_map(m, rulemap, e);
1302
1303 return 0;
1304 }
1305
1306 /**
1307 * pipapo_clone() - Clone matching data to create new working copy
1308 * @old: Existing matching data
1309 *
1310 * Return: copy of matching data passed as 'old', error pointer on failure
1311 */
1312 static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
1313 {
1314 struct nft_pipapo_field *dst, *src;
1315 struct nft_pipapo_match *new;
1316 int i;
1317
1318 new = kmalloc(struct_size(new, f, old->field_count), GFP_KERNEL);
1319 if (!new)
1320 return ERR_PTR(-ENOMEM);
1321
1322 new->field_count = old->field_count;
1323 new->bsize_max = old->bsize_max;
1324
1325 new->scratch = alloc_percpu(*new->scratch);
1326 if (!new->scratch)
1327 goto out_scratch;
1328
1329 for_each_possible_cpu(i)
1330 *per_cpu_ptr(new->scratch, i) = NULL;
1331
1332 if (pipapo_realloc_scratch(new, old->bsize_max))
1333 goto out_scratch_realloc;
1334
1335 rcu_head_init(&new->rcu);
1336
1337 src = old->f;
1338 dst = new->f;
1339
1340 for (i = 0; i < old->field_count; i++) {
1341 unsigned long *new_lt;
1342
1343 memcpy(dst, src, offsetof(struct nft_pipapo_field, lt));
1344
1345 new_lt = kvzalloc(src->groups * NFT_PIPAPO_BUCKETS(src->bb) *
1346 src->bsize * sizeof(*dst->lt) +
1347 NFT_PIPAPO_ALIGN_HEADROOM,
1348 GFP_KERNEL);
1349 if (!new_lt)
1350 goto out_lt;
1351
1352 NFT_PIPAPO_LT_ASSIGN(dst, new_lt);
1353
1354 memcpy(NFT_PIPAPO_LT_ALIGN(new_lt),
1355 NFT_PIPAPO_LT_ALIGN(src->lt),
1356 src->bsize * sizeof(*dst->lt) *
1357 src->groups * NFT_PIPAPO_BUCKETS(src->bb));
1358
1359 dst->mt = kvmalloc(src->rules * sizeof(*src->mt), GFP_KERNEL);
1360 if (!dst->mt)
1361 goto out_mt;
1362
1363 memcpy(dst->mt, src->mt, src->rules * sizeof(*src->mt));
1364 src++;
1365 dst++;
1366 }
1367
1368 return new;
1369
1370 out_mt:
1371 kvfree(dst->lt);
1372 out_lt:
1373 for (dst--; i > 0; i--) {
1374 kvfree(dst->mt);
1375 kvfree(dst->lt);
1376 dst--;
1377 }
1378 out_scratch_realloc:
1379 for_each_possible_cpu(i)
1380 pipapo_free_scratch(new, i);
1381 out_scratch:
1382 free_percpu(new->scratch);
1383 kfree(new);
1384
1385 return ERR_PTR(-ENOMEM);
1386 }
1387
1388 /**
1389 * pipapo_rules_same_key() - Get number of rules originated from the same entry
1390 * @f: Field containing mapping table
1391 * @first: Index of first rule in set of rules mapping to same entry
1392 *
1393 * Using the fact that all rules in a field that originated from the same entry
1394 * will map to the same set of rules in the next field, or to the same element
1395 * reference, return the cardinality of the set of rules that originated from
1396 * the same entry as the rule with index @first, @first rule included.
1397 *
1398 * In pictures:
1399 * rules
1400 * field #0 0 1 2 3 4
1401 * map to: 0 1 2-4 2-4 5-9
1402 * . . ....... . ...
1403 * | | | | \ \
1404 * | | | | \ \
1405 * | | | | \ \
1406 * ' ' ' ' ' \
1407 * in field #1 0 1 2 3 4 5 ...
1408 *
1409 * if this is called for rule 2 on field #0, it will return 3, as also rules 2
1410 * and 3 in field 0 map to the same set of rules (2, 3, 4) in the next field.
1411 *
1412 * For the last field in a set, we can rely on associated entries to map to the
1413 * same element references.
1414 *
1415 * Return: Number of rules that originated from the same entry as @first.
1416 */
1417 static int pipapo_rules_same_key(struct nft_pipapo_field *f, int first)
1418 {
1419 struct nft_pipapo_elem *e = NULL; /* Keep gcc happy */
1420 int r;
1421
1422 for (r = first; r < f->rules; r++) {
1423 if (r != first && e != f->mt[r].e)
1424 return r - first;
1425
1426 e = f->mt[r].e;
1427 }
1428
1429 if (r != first)
1430 return r - first;
1431
1432 return 0;
1433 }
1434
1435 /**
1436 * pipapo_unmap() - Remove rules from mapping tables, renumber remaining ones
1437 * @mt: Mapping array
1438 * @rules: Original amount of rules in mapping table
1439 * @start: First rule index to be removed
1440 * @n: Amount of rules to be removed
1441 * @to_offset: First rule index, in next field, this group of rules maps to
1442 * @is_last: If this is the last field, delete reference from mapping array
1443 *
1444 * This is used to unmap rules from the mapping table for a single field,
1445 * maintaining consistency and compactness for the existing ones.
1446 *
1447 * In pictures: let's assume that we want to delete rules 2 and 3 from the
1448 * following mapping array:
1449 *
1450 * rules
1451 * 0 1 2 3 4
1452 * map to: 4-10 4-10 11-15 11-15 16-18
1453 *
1454 * the result will be:
1455 *
1456 * rules
1457 * 0 1 2
1458 * map to: 4-10 4-10 11-13
1459 *
1460 * for fields before the last one. In case this is the mapping table for the
1461 * last field in a set, and rules map to pointers to &struct nft_pipapo_elem:
1462 *
1463 * rules
1464 * 0 1 2 3 4
1465 * element pointers: 0x42 0x42 0x33 0x33 0x44
1466 *
1467 * the result will be:
1468 *
1469 * rules
1470 * 0 1 2
1471 * element pointers: 0x42 0x42 0x44
1472 */
1473 static void pipapo_unmap(union nft_pipapo_map_bucket *mt, int rules,
1474 int start, int n, int to_offset, bool is_last)
1475 {
1476 int i;
1477
1478 memmove(mt + start, mt + start + n, (rules - start - n) * sizeof(*mt));
1479 memset(mt + rules - n, 0, n * sizeof(*mt));
1480
1481 if (is_last)
1482 return;
1483
1484 for (i = start; i < rules - n; i++)
1485 mt[i].to -= to_offset;
1486 }
1487
1488 /**
1489 * pipapo_drop() - Delete entry from lookup and mapping tables, given rule map
1490 * @m: Matching data
1491 * @rulemap: Table of rule maps, arrays of first rule and amount of rules
1492 * in next field a given entry maps to, for each field
1493 *
1494 * For each rule in lookup table buckets mapping to this set of rules, drop
1495 * all bits set in lookup table mapping. In pictures, assuming we want to drop
1496 * rules 0 and 1 from this lookup table:
1497 *
1498 * bucket
1499 * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1500 * 0 0 1,2
1501 * 1 1,2 0
1502 * 2 0 1,2
1503 * 3 0 1,2
1504 * 4 0,1,2
1505 * 5 0 1 2
1506 * 6 0,1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1507 * 7 1,2 1,2 1 1 1 0,1 1 1 1 1 1 1 1 1 1 1
1508 *
1509 * rule 2 becomes rule 0, and the result will be:
1510 *
1511 * bucket
1512 * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1513 * 0 0
1514 * 1 0
1515 * 2 0
1516 * 3 0
1517 * 4 0
1518 * 5 0
1519 * 6 0
1520 * 7 0 0
1521 *
1522 * once this is done, call unmap() to drop all the corresponding rule references
1523 * from mapping tables.
1524 */
1525 static void pipapo_drop(struct nft_pipapo_match *m,
1526 union nft_pipapo_map_bucket rulemap[])
1527 {
1528 struct nft_pipapo_field *f;
1529 int i;
1530
1531 nft_pipapo_for_each_field(f, i, m) {
1532 int g;
1533
1534 for (g = 0; g < f->groups; g++) {
1535 unsigned long *pos;
1536 int b;
1537
1538 pos = NFT_PIPAPO_LT_ALIGN(f->lt) + g *
1539 NFT_PIPAPO_BUCKETS(f->bb) * f->bsize;
1540
1541 for (b = 0; b < NFT_PIPAPO_BUCKETS(f->bb); b++) {
1542 bitmap_cut(pos, pos, rulemap[i].to,
1543 rulemap[i].n,
1544 f->bsize * BITS_PER_LONG);
1545
1546 pos += f->bsize;
1547 }
1548 }
1549
1550 pipapo_unmap(f->mt, f->rules, rulemap[i].to, rulemap[i].n,
1551 rulemap[i + 1].n, i == m->field_count - 1);
1552 if (pipapo_resize(f, f->rules, f->rules - rulemap[i].n)) {
1553 /* We can ignore this, a failure to shrink tables down
1554 * doesn't make tables invalid.
1555 */
1556 ;
1557 }
1558 f->rules -= rulemap[i].n;
1559
1560 pipapo_lt_bits_adjust(f);
1561 }
1562 }
1563
1564 static void nft_pipapo_gc_deactivate(struct net *net, struct nft_set *set,
1565 struct nft_pipapo_elem *e)
1566
1567 {
1568 struct nft_set_elem elem = {
1569 .priv = e,
1570 };
1571
1572 nft_setelem_data_deactivate(net, set, &elem);
1573 }
1574
1575 /**
1576 * pipapo_gc() - Drop expired entries from set, destroy start and end elements
1577 * @_set: nftables API set representation
1578 * @m: Matching data
1579 */
1580 static void pipapo_gc(const struct nft_set *_set, struct nft_pipapo_match *m)
1581 {
1582 struct nft_set *set = (struct nft_set *) _set;
1583 struct nft_pipapo *priv = nft_set_priv(set);
1584 struct net *net = read_pnet(&set->net);
1585 int rules_f0, first_rule = 0;
1586 struct nft_pipapo_elem *e;
1587 struct nft_trans_gc *gc;
1588
1589 gc = nft_trans_gc_alloc(set, 0, GFP_KERNEL);
1590 if (!gc)
1591 return;
1592
1593 while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) {
1594 union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
1595 const struct nft_pipapo_field *f;
1596 int i, start, rules_fx;
1597
1598 start = first_rule;
1599 rules_fx = rules_f0;
1600
1601 nft_pipapo_for_each_field(f, i, m) {
1602 rulemap[i].to = start;
1603 rulemap[i].n = rules_fx;
1604
1605 if (i < m->field_count - 1) {
1606 rules_fx = f->mt[start].n;
1607 start = f->mt[start].to;
1608 }
1609 }
1610
1611 /* Pick the last field, and its last index */
1612 f--;
1613 i--;
1614 e = f->mt[rulemap[i].to].e;
1615
1616 /* synchronous gc never fails, there is no need to set on
1617 * NFT_SET_ELEM_DEAD_BIT.
1618 */
1619 if (nft_set_elem_expired(&e->ext)) {
1620 priv->dirty = true;
1621
1622 gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
1623 if (!gc)
1624 return;
1625
1626 nft_pipapo_gc_deactivate(net, set, e);
1627 pipapo_drop(m, rulemap);
1628 nft_trans_gc_elem_add(gc, e);
1629
1630 /* And check again current first rule, which is now the
1631 * first we haven't checked.
1632 */
1633 } else {
1634 first_rule += rules_f0;
1635 }
1636 }
1637
1638 gc = nft_trans_gc_catchall_sync(gc);
1639 if (gc) {
1640 nft_trans_gc_queue_sync_done(gc);
1641 priv->last_gc = jiffies;
1642 }
1643 }
1644
1645 /**
1646 * pipapo_free_fields() - Free per-field tables contained in matching data
1647 * @m: Matching data
1648 */
1649 static void pipapo_free_fields(struct nft_pipapo_match *m)
1650 {
1651 struct nft_pipapo_field *f;
1652 int i;
1653
1654 nft_pipapo_for_each_field(f, i, m) {
1655 kvfree(f->lt);
1656 kvfree(f->mt);
1657 }
1658 }
1659
1660 static void pipapo_free_match(struct nft_pipapo_match *m)
1661 {
1662 int i;
1663
1664 for_each_possible_cpu(i)
1665 pipapo_free_scratch(m, i);
1666
1667 free_percpu(m->scratch);
1668 pipapo_free_fields(m);
1669
1670 kfree(m);
1671 }
1672
1673 /**
1674 * pipapo_reclaim_match - RCU callback to free fields from old matching data
1675 * @rcu: RCU head
1676 */
1677 static void pipapo_reclaim_match(struct rcu_head *rcu)
1678 {
1679 struct nft_pipapo_match *m;
1680
1681 m = container_of(rcu, struct nft_pipapo_match, rcu);
1682 pipapo_free_match(m);
1683 }
1684
1685 /**
1686 * nft_pipapo_commit() - Replace lookup data with current working copy
1687 * @set: nftables API set representation
1688 *
1689 * While at it, check if we should perform garbage collection on the working
1690 * copy before committing it for lookup, and don't replace the table if the
1691 * working copy doesn't have pending changes.
1692 *
1693 * We also need to create a new working copy for subsequent insertions and
1694 * deletions.
1695 */
1696 static void nft_pipapo_commit(const struct nft_set *set)
1697 {
1698 struct nft_pipapo *priv = nft_set_priv(set);
1699 struct nft_pipapo_match *new_clone, *old;
1700
1701 if (time_after_eq(jiffies, priv->last_gc + nft_set_gc_interval(set)))
1702 pipapo_gc(set, priv->clone);
1703
1704 if (!priv->dirty)
1705 return;
1706
1707 new_clone = pipapo_clone(priv->clone);
1708 if (IS_ERR(new_clone))
1709 return;
1710
1711 priv->dirty = false;
1712
1713 old = rcu_access_pointer(priv->match);
1714 rcu_assign_pointer(priv->match, priv->clone);
1715 if (old)
1716 call_rcu(&old->rcu, pipapo_reclaim_match);
1717
1718 priv->clone = new_clone;
1719 }
1720
1721 static bool nft_pipapo_transaction_mutex_held(const struct nft_set *set)
1722 {
1723 #ifdef CONFIG_PROVE_LOCKING
1724 const struct net *net = read_pnet(&set->net);
1725
1726 return lockdep_is_held(&nft_pernet(net)->commit_mutex);
1727 #else
1728 return true;
1729 #endif
1730 }
1731
1732 static void nft_pipapo_abort(const struct nft_set *set)
1733 {
1734 struct nft_pipapo *priv = nft_set_priv(set);
1735 struct nft_pipapo_match *new_clone, *m;
1736
1737 if (!priv->dirty)
1738 return;
1739
1740 m = rcu_dereference_protected(priv->match, nft_pipapo_transaction_mutex_held(set));
1741
1742 new_clone = pipapo_clone(m);
1743 if (IS_ERR(new_clone))
1744 return;
1745
1746 priv->dirty = false;
1747
1748 pipapo_free_match(priv->clone);
1749 priv->clone = new_clone;
1750 }
1751
1752 /**
1753 * nft_pipapo_activate() - Mark element reference as active given key, commit
1754 * @net: Network namespace
1755 * @set: nftables API set representation
1756 * @elem: nftables API element representation containing key data
1757 *
1758 * On insertion, elements are added to a copy of the matching data currently
1759 * in use for lookups, and not directly inserted into current lookup data. Both
1760 * nft_pipapo_insert() and nft_pipapo_activate() are called once for each
1761 * element, hence we can't purpose either one as a real commit operation.
1762 */
1763 static void nft_pipapo_activate(const struct net *net,
1764 const struct nft_set *set,
1765 const struct nft_set_elem *elem)
1766 {
1767 struct nft_pipapo_elem *e = elem->priv;
1768
1769 nft_clear(net, &e->ext);
1770 }
1771
1772 /**
1773 * pipapo_deactivate() - Check that element is in set, mark as inactive
1774 * @net: Network namespace
1775 * @set: nftables API set representation
1776 * @data: Input key data
1777 * @ext: nftables API extension pointer, used to check for end element
1778 *
1779 * This is a convenience function that can be called from both
1780 * nft_pipapo_deactivate() and nft_pipapo_flush(), as they are in fact the same
1781 * operation.
1782 *
1783 * Return: deactivated element if found, NULL otherwise.
1784 */
1785 static void *pipapo_deactivate(const struct net *net, const struct nft_set *set,
1786 const u8 *data, const struct nft_set_ext *ext)
1787 {
1788 struct nft_pipapo_elem *e;
1789
1790 e = pipapo_get(net, set, data, nft_genmask_next(net));
1791 if (IS_ERR(e))
1792 return NULL;
1793
1794 nft_set_elem_change_active(net, set, &e->ext);
1795
1796 return e;
1797 }
1798
1799 /**
1800 * nft_pipapo_deactivate() - Call pipapo_deactivate() to make element inactive
1801 * @net: Network namespace
1802 * @set: nftables API set representation
1803 * @elem: nftables API element representation containing key data
1804 *
1805 * Return: deactivated element if found, NULL otherwise.
1806 */
1807 static void *nft_pipapo_deactivate(const struct net *net,
1808 const struct nft_set *set,
1809 const struct nft_set_elem *elem)
1810 {
1811 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
1812
1813 return pipapo_deactivate(net, set, (const u8 *)elem->key.val.data, ext);
1814 }
1815
1816 /**
1817 * nft_pipapo_flush() - Call pipapo_deactivate() to make element inactive
1818 * @net: Network namespace
1819 * @set: nftables API set representation
1820 * @elem: nftables API element representation containing key data
1821 *
1822 * This is functionally the same as nft_pipapo_deactivate(), with a slightly
1823 * different interface, and it's also called once for each element in a set
1824 * being flushed, so we can't implement, strictly speaking, a flush operation,
1825 * which would otherwise be as simple as allocating an empty copy of the
1826 * matching data.
1827 *
1828 * Note that we could in theory do that, mark the set as flushed, and ignore
1829 * subsequent calls, but we would leak all the elements after the first one,
1830 * because they wouldn't then be freed as result of API calls.
1831 *
1832 * Return: true if element was found and deactivated.
1833 */
1834 static bool nft_pipapo_flush(const struct net *net, const struct nft_set *set,
1835 void *elem)
1836 {
1837 struct nft_pipapo_elem *e = elem;
1838
1839 return pipapo_deactivate(net, set, (const u8 *)nft_set_ext_key(&e->ext),
1840 &e->ext);
1841 }
1842
1843 /**
1844 * pipapo_get_boundaries() - Get byte interval for associated rules
1845 * @f: Field including lookup table
1846 * @first_rule: First rule (lowest index)
1847 * @rule_count: Number of associated rules
1848 * @left: Byte expression for left boundary (start of range)
1849 * @right: Byte expression for right boundary (end of range)
1850 *
1851 * Given the first rule and amount of rules that originated from the same entry,
1852 * build the original range associated with the entry, and calculate the length
1853 * of the originating netmask.
1854 *
1855 * In pictures:
1856 *
1857 * bucket
1858 * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1859 * 0 1,2
1860 * 1 1,2
1861 * 2 1,2
1862 * 3 1,2
1863 * 4 1,2
1864 * 5 1 2
1865 * 6 1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1866 * 7 1,2 1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1867 *
1868 * this is the lookup table corresponding to the IPv4 range
1869 * 192.168.1.0-192.168.2.1, which was expanded to the two composing netmasks,
1870 * rule #1: 192.168.1.0/24, and rule #2: 192.168.2.0/31.
1871 *
1872 * This function fills @left and @right with the byte values of the leftmost
1873 * and rightmost bucket indices for the lowest and highest rule indices,
1874 * respectively. If @first_rule is 1 and @rule_count is 2, we obtain, in
1875 * nibbles:
1876 * left: < 12, 0, 10, 8, 0, 1, 0, 0 >
1877 * right: < 12, 0, 10, 8, 0, 2, 2, 1 >
1878 * corresponding to bytes:
1879 * left: < 192, 168, 1, 0 >
1880 * right: < 192, 168, 2, 1 >
1881 * with mask length irrelevant here, unused on return, as the range is already
1882 * defined by its start and end points. The mask length is relevant for a single
1883 * ranged entry instead: if @first_rule is 1 and @rule_count is 1, we ignore
1884 * rule 2 above: @left becomes < 192, 168, 1, 0 >, @right becomes
1885 * < 192, 168, 1, 255 >, and the mask length, calculated from the distances
1886 * between leftmost and rightmost bucket indices for each group, would be 24.
1887 *
1888 * Return: mask length, in bits.
1889 */
1890 static int pipapo_get_boundaries(struct nft_pipapo_field *f, int first_rule,
1891 int rule_count, u8 *left, u8 *right)
1892 {
1893 int g, mask_len = 0, bit_offset = 0;
1894 u8 *l = left, *r = right;
1895
1896 for (g = 0; g < f->groups; g++) {
1897 int b, x0, x1;
1898
1899 x0 = -1;
1900 x1 = -1;
1901 for (b = 0; b < NFT_PIPAPO_BUCKETS(f->bb); b++) {
1902 unsigned long *pos;
1903
1904 pos = NFT_PIPAPO_LT_ALIGN(f->lt) +
1905 (g * NFT_PIPAPO_BUCKETS(f->bb) + b) * f->bsize;
1906 if (test_bit(first_rule, pos) && x0 == -1)
1907 x0 = b;
1908 if (test_bit(first_rule + rule_count - 1, pos))
1909 x1 = b;
1910 }
1911
1912 *l |= x0 << (BITS_PER_BYTE - f->bb - bit_offset);
1913 *r |= x1 << (BITS_PER_BYTE - f->bb - bit_offset);
1914
1915 bit_offset += f->bb;
1916 if (bit_offset >= BITS_PER_BYTE) {
1917 bit_offset %= BITS_PER_BYTE;
1918 l++;
1919 r++;
1920 }
1921
1922 if (x1 - x0 == 0)
1923 mask_len += 4;
1924 else if (x1 - x0 == 1)
1925 mask_len += 3;
1926 else if (x1 - x0 == 3)
1927 mask_len += 2;
1928 else if (x1 - x0 == 7)
1929 mask_len += 1;
1930 }
1931
1932 return mask_len;
1933 }
1934
1935 /**
1936 * pipapo_match_field() - Match rules against byte ranges
1937 * @f: Field including the lookup table
1938 * @first_rule: First of associated rules originating from same entry
1939 * @rule_count: Amount of associated rules
1940 * @start: Start of range to be matched
1941 * @end: End of range to be matched
1942 *
1943 * Return: true on match, false otherwise.
1944 */
1945 static bool pipapo_match_field(struct nft_pipapo_field *f,
1946 int first_rule, int rule_count,
1947 const u8 *start, const u8 *end)
1948 {
1949 u8 right[NFT_PIPAPO_MAX_BYTES] = { 0 };
1950 u8 left[NFT_PIPAPO_MAX_BYTES] = { 0 };
1951
1952 pipapo_get_boundaries(f, first_rule, rule_count, left, right);
1953
1954 return !memcmp(start, left,
1955 f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f)) &&
1956 !memcmp(end, right, f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f));
1957 }
1958
1959 /**
1960 * nft_pipapo_remove() - Remove element given key, commit
1961 * @net: Network namespace
1962 * @set: nftables API set representation
1963 * @elem: nftables API element representation containing key data
1964 *
1965 * Similarly to nft_pipapo_activate(), this is used as commit operation by the
1966 * API, but it's called once per element in the pending transaction, so we can't
1967 * implement this as a single commit operation. Closest we can get is to remove
1968 * the matched element here, if any, and commit the updated matching data.
1969 */
1970 static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
1971 const struct nft_set_elem *elem)
1972 {
1973 struct nft_pipapo *priv = nft_set_priv(set);
1974 struct nft_pipapo_match *m = priv->clone;
1975 struct nft_pipapo_elem *e = elem->priv;
1976 int rules_f0, first_rule = 0;
1977 const u8 *data;
1978
1979 data = (const u8 *)nft_set_ext_key(&e->ext);
1980
1981 while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) {
1982 union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
1983 const u8 *match_start, *match_end;
1984 struct nft_pipapo_field *f;
1985 int i, start, rules_fx;
1986
1987 match_start = data;
1988
1989 if (nft_set_ext_exists(&e->ext, NFT_SET_EXT_KEY_END))
1990 match_end = (const u8 *)nft_set_ext_key_end(&e->ext)->data;
1991 else
1992 match_end = data;
1993
1994 start = first_rule;
1995 rules_fx = rules_f0;
1996
1997 nft_pipapo_for_each_field(f, i, m) {
1998 bool last = i == m->field_count - 1;
1999
2000 if (!pipapo_match_field(f, start, rules_fx,
2001 match_start, match_end))
2002 break;
2003
2004 rulemap[i].to = start;
2005 rulemap[i].n = rules_fx;
2006
2007 rules_fx = f->mt[start].n;
2008 start = f->mt[start].to;
2009
2010 match_start += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
2011 match_end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
2012
2013 if (last && f->mt[rulemap[i].to].e == e) {
2014 priv->dirty = true;
2015 pipapo_drop(m, rulemap);
2016 return;
2017 }
2018 }
2019
2020 first_rule += rules_f0;
2021 }
2022
2023 WARN_ON_ONCE(1); /* elem_priv not found */
2024 }
2025
2026 /**
2027 * nft_pipapo_walk() - Walk over elements
2028 * @ctx: nftables API context
2029 * @set: nftables API set representation
2030 * @iter: Iterator
2031 *
2032 * As elements are referenced in the mapping array for the last field, directly
2033 * scan that array: there's no need to follow rule mappings from the first
2034 * field.
2035 */
2036 static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
2037 struct nft_set_iter *iter)
2038 {
2039 struct nft_pipapo *priv = nft_set_priv(set);
2040 const struct nft_pipapo_match *m;
2041 const struct nft_pipapo_field *f;
2042 int i, r;
2043
2044 WARN_ON_ONCE(iter->type != NFT_ITER_READ &&
2045 iter->type != NFT_ITER_UPDATE);
2046
2047 rcu_read_lock();
2048 if (iter->type == NFT_ITER_READ)
2049 m = rcu_dereference(priv->match);
2050 else
2051 m = priv->clone;
2052
2053 if (unlikely(!m))
2054 goto out;
2055
2056 for (i = 0, f = m->f; i < m->field_count - 1; i++, f++)
2057 ;
2058
2059 for (r = 0; r < f->rules; r++) {
2060 struct nft_pipapo_elem *e;
2061 struct nft_set_elem elem;
2062
2063 if (r < f->rules - 1 && f->mt[r + 1].e == f->mt[r].e)
2064 continue;
2065
2066 if (iter->count < iter->skip)
2067 goto cont;
2068
2069 e = f->mt[r].e;
2070
2071 elem.priv = e;
2072
2073 iter->err = iter->fn(ctx, set, iter, &elem);
2074 if (iter->err < 0)
2075 goto out;
2076
2077 cont:
2078 iter->count++;
2079 }
2080
2081 out:
2082 rcu_read_unlock();
2083 }
2084
2085 /**
2086 * nft_pipapo_privsize() - Return the size of private data for the set
2087 * @nla: netlink attributes, ignored as size doesn't depend on them
2088 * @desc: Set description, ignored as size doesn't depend on it
2089 *
2090 * Return: size of private data for this set implementation, in bytes
2091 */
2092 static u64 nft_pipapo_privsize(const struct nlattr * const nla[],
2093 const struct nft_set_desc *desc)
2094 {
2095 return sizeof(struct nft_pipapo);
2096 }
2097
2098 /**
2099 * nft_pipapo_estimate() - Set size, space and lookup complexity
2100 * @desc: Set description, element count and field description used
2101 * @features: Flags: NFT_SET_INTERVAL needs to be there
2102 * @est: Storage for estimation data
2103 *
2104 * Return: true if set description is compatible, false otherwise
2105 */
2106 static bool nft_pipapo_estimate(const struct nft_set_desc *desc, u32 features,
2107 struct nft_set_estimate *est)
2108 {
2109 if (!(features & NFT_SET_INTERVAL) ||
2110 desc->field_count < NFT_PIPAPO_MIN_FIELDS)
2111 return false;
2112
2113 est->size = pipapo_estimate_size(desc);
2114 if (!est->size)
2115 return false;
2116
2117 est->lookup = NFT_SET_CLASS_O_LOG_N;
2118
2119 est->space = NFT_SET_CLASS_O_N;
2120
2121 return true;
2122 }
2123
2124 /**
2125 * nft_pipapo_init() - Initialise data for a set instance
2126 * @set: nftables API set representation
2127 * @desc: Set description
2128 * @nla: netlink attributes
2129 *
2130 * Validate number and size of fields passed as NFTA_SET_DESC_CONCAT netlink
2131 * attributes, initialise internal set parameters, current instance of matching
2132 * data and a copy for subsequent insertions.
2133 *
2134 * Return: 0 on success, negative error code on failure.
2135 */
2136 static int nft_pipapo_init(const struct nft_set *set,
2137 const struct nft_set_desc *desc,
2138 const struct nlattr * const nla[])
2139 {
2140 struct nft_pipapo *priv = nft_set_priv(set);
2141 struct nft_pipapo_match *m;
2142 struct nft_pipapo_field *f;
2143 int err, i, field_count;
2144
2145 field_count = desc->field_count ? : 1;
2146
2147 if (field_count > NFT_PIPAPO_MAX_FIELDS)
2148 return -EINVAL;
2149
2150 m = kmalloc(struct_size(m, f, field_count), GFP_KERNEL);
2151 if (!m)
2152 return -ENOMEM;
2153
2154 m->field_count = field_count;
2155 m->bsize_max = 0;
2156
2157 m->scratch = alloc_percpu(struct nft_pipapo_scratch *);
2158 if (!m->scratch) {
2159 err = -ENOMEM;
2160 goto out_scratch;
2161 }
2162 for_each_possible_cpu(i)
2163 *per_cpu_ptr(m->scratch, i) = NULL;
2164
2165 rcu_head_init(&m->rcu);
2166
2167 nft_pipapo_for_each_field(f, i, m) {
2168 int len = desc->field_len[i] ? : set->klen;
2169
2170 f->bb = NFT_PIPAPO_GROUP_BITS_INIT;
2171 f->groups = len * NFT_PIPAPO_GROUPS_PER_BYTE(f);
2172
2173 priv->width += round_up(len, sizeof(u32));
2174
2175 f->bsize = 0;
2176 f->rules = 0;
2177 NFT_PIPAPO_LT_ASSIGN(f, NULL);
2178 f->mt = NULL;
2179 }
2180
2181 /* Create an initial clone of matching data for next insertion */
2182 priv->clone = pipapo_clone(m);
2183 if (IS_ERR(priv->clone)) {
2184 err = PTR_ERR(priv->clone);
2185 goto out_free;
2186 }
2187
2188 priv->dirty = false;
2189
2190 rcu_assign_pointer(priv->match, m);
2191
2192 return 0;
2193
2194 out_free:
2195 free_percpu(m->scratch);
2196 out_scratch:
2197 kfree(m);
2198
2199 return err;
2200 }
2201
2202 /**
2203 * nft_set_pipapo_match_destroy() - Destroy elements from key mapping array
2204 * @ctx: context
2205 * @set: nftables API set representation
2206 * @m: matching data pointing to key mapping array
2207 */
2208 static void nft_set_pipapo_match_destroy(const struct nft_ctx *ctx,
2209 const struct nft_set *set,
2210 struct nft_pipapo_match *m)
2211 {
2212 struct nft_pipapo_field *f;
2213 int i, r;
2214
2215 for (i = 0, f = m->f; i < m->field_count - 1; i++, f++)
2216 ;
2217
2218 for (r = 0; r < f->rules; r++) {
2219 struct nft_pipapo_elem *e;
2220
2221 if (r < f->rules - 1 && f->mt[r + 1].e == f->mt[r].e)
2222 continue;
2223
2224 e = f->mt[r].e;
2225
2226 nf_tables_set_elem_destroy(ctx, set, e);
2227 }
2228 }
2229
2230 /**
2231 * nft_pipapo_destroy() - Free private data for set and all committed elements
2232 * @ctx: context
2233 * @set: nftables API set representation
2234 */
2235 static void nft_pipapo_destroy(const struct nft_ctx *ctx,
2236 const struct nft_set *set)
2237 {
2238 struct nft_pipapo *priv = nft_set_priv(set);
2239 struct nft_pipapo_match *m;
2240 int cpu;
2241
2242 m = rcu_dereference_protected(priv->match, true);
2243 if (m) {
2244 rcu_barrier();
2245
2246 for_each_possible_cpu(cpu)
2247 pipapo_free_scratch(m, cpu);
2248 free_percpu(m->scratch);
2249 pipapo_free_fields(m);
2250 kfree(m);
2251 priv->match = NULL;
2252 }
2253
2254 if (priv->clone) {
2255 m = priv->clone;
2256
2257 nft_set_pipapo_match_destroy(ctx, set, m);
2258
2259 for_each_possible_cpu(cpu)
2260 pipapo_free_scratch(priv->clone, cpu);
2261 free_percpu(priv->clone->scratch);
2262
2263 pipapo_free_fields(priv->clone);
2264 kfree(priv->clone);
2265 priv->clone = NULL;
2266 }
2267 }
2268
2269 /**
2270 * nft_pipapo_gc_init() - Initialise garbage collection
2271 * @set: nftables API set representation
2272 *
2273 * Instead of actually setting up a periodic work for garbage collection, as
2274 * this operation requires a swap of matching data with the working copy, we'll
2275 * do that opportunistically with other commit operations if the interval is
2276 * elapsed, so we just need to set the current jiffies timestamp here.
2277 */
2278 static void nft_pipapo_gc_init(const struct nft_set *set)
2279 {
2280 struct nft_pipapo *priv = nft_set_priv(set);
2281
2282 priv->last_gc = jiffies;
2283 }
2284
2285 const struct nft_set_type nft_set_pipapo_type = {
2286 .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT |
2287 NFT_SET_TIMEOUT,
2288 .ops = {
2289 .lookup = nft_pipapo_lookup,
2290 .insert = nft_pipapo_insert,
2291 .activate = nft_pipapo_activate,
2292 .deactivate = nft_pipapo_deactivate,
2293 .flush = nft_pipapo_flush,
2294 .remove = nft_pipapo_remove,
2295 .walk = nft_pipapo_walk,
2296 .get = nft_pipapo_get,
2297 .privsize = nft_pipapo_privsize,
2298 .estimate = nft_pipapo_estimate,
2299 .init = nft_pipapo_init,
2300 .destroy = nft_pipapo_destroy,
2301 .gc_init = nft_pipapo_gc_init,
2302 .commit = nft_pipapo_commit,
2303 .abort = nft_pipapo_abort,
2304 .elemsize = offsetof(struct nft_pipapo_elem, ext),
2305 },
2306 };
2307
2308 #if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
2309 const struct nft_set_type nft_set_pipapo_avx2_type = {
2310 .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT |
2311 NFT_SET_TIMEOUT,
2312 .ops = {
2313 .lookup = nft_pipapo_avx2_lookup,
2314 .insert = nft_pipapo_insert,
2315 .activate = nft_pipapo_activate,
2316 .deactivate = nft_pipapo_deactivate,
2317 .flush = nft_pipapo_flush,
2318 .remove = nft_pipapo_remove,
2319 .walk = nft_pipapo_walk,
2320 .get = nft_pipapo_get,
2321 .privsize = nft_pipapo_privsize,
2322 .estimate = nft_pipapo_avx2_estimate,
2323 .init = nft_pipapo_init,
2324 .destroy = nft_pipapo_destroy,
2325 .gc_init = nft_pipapo_gc_init,
2326 .commit = nft_pipapo_commit,
2327 .abort = nft_pipapo_abort,
2328 .elemsize = offsetof(struct nft_pipapo_elem, ext),
2329 },
2330 };
2331 #endif
2332