1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * IPv6 IOAM implementation
4 *
5 * Author:
6 * Justin Iurman <justin.iurman@uliege.be>
7 */
8
9 #include <linux/errno.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/net.h>
13 #include <linux/ioam6.h>
14 #include <linux/ioam6_genl.h>
15 #include <linux/rhashtable.h>
16 #include <linux/netdevice.h>
17
18 #include <net/addrconf.h>
19 #include <net/genetlink.h>
20 #include <net/ioam6.h>
21 #include <net/sch_generic.h>
22
ioam6_ns_release(struct ioam6_namespace * ns)23 static void ioam6_ns_release(struct ioam6_namespace *ns)
24 {
25 kfree_rcu(ns, rcu);
26 }
27
ioam6_sc_release(struct ioam6_schema * sc)28 static void ioam6_sc_release(struct ioam6_schema *sc)
29 {
30 kfree_rcu(sc, rcu);
31 }
32
ioam6_free_ns(void * ptr,void * arg)33 static void ioam6_free_ns(void *ptr, void *arg)
34 {
35 struct ioam6_namespace *ns = (struct ioam6_namespace *)ptr;
36
37 if (ns)
38 ioam6_ns_release(ns);
39 }
40
ioam6_free_sc(void * ptr,void * arg)41 static void ioam6_free_sc(void *ptr, void *arg)
42 {
43 struct ioam6_schema *sc = (struct ioam6_schema *)ptr;
44
45 if (sc)
46 ioam6_sc_release(sc);
47 }
48
ioam6_ns_cmpfn(struct rhashtable_compare_arg * arg,const void * obj)49 static int ioam6_ns_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
50 {
51 const struct ioam6_namespace *ns = obj;
52
53 return (ns->id != *(__be16 *)arg->key);
54 }
55
ioam6_sc_cmpfn(struct rhashtable_compare_arg * arg,const void * obj)56 static int ioam6_sc_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
57 {
58 const struct ioam6_schema *sc = obj;
59
60 return (sc->id != *(u32 *)arg->key);
61 }
62
63 static const struct rhashtable_params rht_ns_params = {
64 .key_len = sizeof(__be16),
65 .key_offset = offsetof(struct ioam6_namespace, id),
66 .head_offset = offsetof(struct ioam6_namespace, head),
67 .automatic_shrinking = true,
68 .obj_cmpfn = ioam6_ns_cmpfn,
69 };
70
71 static const struct rhashtable_params rht_sc_params = {
72 .key_len = sizeof(u32),
73 .key_offset = offsetof(struct ioam6_schema, id),
74 .head_offset = offsetof(struct ioam6_schema, head),
75 .automatic_shrinking = true,
76 .obj_cmpfn = ioam6_sc_cmpfn,
77 };
78
79 static struct genl_family ioam6_genl_family;
80
81 static const struct nla_policy ioam6_genl_policy_addns[] = {
82 [IOAM6_ATTR_NS_ID] = { .type = NLA_U16 },
83 [IOAM6_ATTR_NS_DATA] = { .type = NLA_U32 },
84 [IOAM6_ATTR_NS_DATA_WIDE] = { .type = NLA_U64 },
85 };
86
87 static const struct nla_policy ioam6_genl_policy_delns[] = {
88 [IOAM6_ATTR_NS_ID] = { .type = NLA_U16 },
89 };
90
91 static const struct nla_policy ioam6_genl_policy_addsc[] = {
92 [IOAM6_ATTR_SC_ID] = { .type = NLA_U32 },
93 [IOAM6_ATTR_SC_DATA] = { .type = NLA_BINARY,
94 .len = IOAM6_MAX_SCHEMA_DATA_LEN },
95 };
96
97 static const struct nla_policy ioam6_genl_policy_delsc[] = {
98 [IOAM6_ATTR_SC_ID] = { .type = NLA_U32 },
99 };
100
101 static const struct nla_policy ioam6_genl_policy_ns_sc[] = {
102 [IOAM6_ATTR_NS_ID] = { .type = NLA_U16 },
103 [IOAM6_ATTR_SC_ID] = { .type = NLA_U32 },
104 [IOAM6_ATTR_SC_NONE] = { .type = NLA_FLAG },
105 };
106
ioam6_genl_addns(struct sk_buff * skb,struct genl_info * info)107 static int ioam6_genl_addns(struct sk_buff *skb, struct genl_info *info)
108 {
109 struct ioam6_pernet_data *nsdata;
110 struct ioam6_namespace *ns;
111 u64 data64;
112 u32 data32;
113 __be16 id;
114 int err;
115
116 if (!info->attrs[IOAM6_ATTR_NS_ID])
117 return -EINVAL;
118
119 id = cpu_to_be16(nla_get_u16(info->attrs[IOAM6_ATTR_NS_ID]));
120 nsdata = ioam6_pernet(genl_info_net(info));
121
122 mutex_lock(&nsdata->lock);
123
124 ns = rhashtable_lookup_fast(&nsdata->namespaces, &id, rht_ns_params);
125 if (ns) {
126 err = -EEXIST;
127 goto out_unlock;
128 }
129
130 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
131 if (!ns) {
132 err = -ENOMEM;
133 goto out_unlock;
134 }
135
136 ns->id = id;
137
138 if (!info->attrs[IOAM6_ATTR_NS_DATA])
139 data32 = IOAM6_U32_UNAVAILABLE;
140 else
141 data32 = nla_get_u32(info->attrs[IOAM6_ATTR_NS_DATA]);
142
143 if (!info->attrs[IOAM6_ATTR_NS_DATA_WIDE])
144 data64 = IOAM6_U64_UNAVAILABLE;
145 else
146 data64 = nla_get_u64(info->attrs[IOAM6_ATTR_NS_DATA_WIDE]);
147
148 ns->data = cpu_to_be32(data32);
149 ns->data_wide = cpu_to_be64(data64);
150
151 err = rhashtable_lookup_insert_fast(&nsdata->namespaces, &ns->head,
152 rht_ns_params);
153 if (err)
154 kfree(ns);
155
156 out_unlock:
157 mutex_unlock(&nsdata->lock);
158 return err;
159 }
160
ioam6_genl_delns(struct sk_buff * skb,struct genl_info * info)161 static int ioam6_genl_delns(struct sk_buff *skb, struct genl_info *info)
162 {
163 struct ioam6_pernet_data *nsdata;
164 struct ioam6_namespace *ns;
165 struct ioam6_schema *sc;
166 __be16 id;
167 int err;
168
169 if (!info->attrs[IOAM6_ATTR_NS_ID])
170 return -EINVAL;
171
172 id = cpu_to_be16(nla_get_u16(info->attrs[IOAM6_ATTR_NS_ID]));
173 nsdata = ioam6_pernet(genl_info_net(info));
174
175 mutex_lock(&nsdata->lock);
176
177 ns = rhashtable_lookup_fast(&nsdata->namespaces, &id, rht_ns_params);
178 if (!ns) {
179 err = -ENOENT;
180 goto out_unlock;
181 }
182
183 sc = rcu_dereference_protected(ns->schema,
184 lockdep_is_held(&nsdata->lock));
185
186 err = rhashtable_remove_fast(&nsdata->namespaces, &ns->head,
187 rht_ns_params);
188 if (err)
189 goto out_unlock;
190
191 if (sc)
192 rcu_assign_pointer(sc->ns, NULL);
193
194 ioam6_ns_release(ns);
195
196 out_unlock:
197 mutex_unlock(&nsdata->lock);
198 return err;
199 }
200
__ioam6_genl_dumpns_element(struct ioam6_namespace * ns,u32 portid,u32 seq,u32 flags,struct sk_buff * skb,u8 cmd)201 static int __ioam6_genl_dumpns_element(struct ioam6_namespace *ns,
202 u32 portid,
203 u32 seq,
204 u32 flags,
205 struct sk_buff *skb,
206 u8 cmd)
207 {
208 struct ioam6_schema *sc;
209 u64 data64;
210 u32 data32;
211 void *hdr;
212
213 hdr = genlmsg_put(skb, portid, seq, &ioam6_genl_family, flags, cmd);
214 if (!hdr)
215 return -ENOMEM;
216
217 data32 = be32_to_cpu(ns->data);
218 data64 = be64_to_cpu(ns->data_wide);
219
220 if (nla_put_u16(skb, IOAM6_ATTR_NS_ID, be16_to_cpu(ns->id)) ||
221 (data32 != IOAM6_U32_UNAVAILABLE &&
222 nla_put_u32(skb, IOAM6_ATTR_NS_DATA, data32)) ||
223 (data64 != IOAM6_U64_UNAVAILABLE &&
224 nla_put_u64_64bit(skb, IOAM6_ATTR_NS_DATA_WIDE,
225 data64, IOAM6_ATTR_PAD)))
226 goto nla_put_failure;
227
228 rcu_read_lock();
229
230 sc = rcu_dereference(ns->schema);
231 if (sc && nla_put_u32(skb, IOAM6_ATTR_SC_ID, sc->id)) {
232 rcu_read_unlock();
233 goto nla_put_failure;
234 }
235
236 rcu_read_unlock();
237
238 genlmsg_end(skb, hdr);
239 return 0;
240
241 nla_put_failure:
242 genlmsg_cancel(skb, hdr);
243 return -EMSGSIZE;
244 }
245
ioam6_genl_dumpns_start(struct netlink_callback * cb)246 static int ioam6_genl_dumpns_start(struct netlink_callback *cb)
247 {
248 struct ioam6_pernet_data *nsdata = ioam6_pernet(sock_net(cb->skb->sk));
249 struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
250
251 if (!iter) {
252 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
253 if (!iter)
254 return -ENOMEM;
255
256 cb->args[0] = (long)iter;
257 }
258
259 rhashtable_walk_enter(&nsdata->namespaces, iter);
260
261 return 0;
262 }
263
ioam6_genl_dumpns_done(struct netlink_callback * cb)264 static int ioam6_genl_dumpns_done(struct netlink_callback *cb)
265 {
266 struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
267
268 rhashtable_walk_exit(iter);
269 kfree(iter);
270
271 return 0;
272 }
273
ioam6_genl_dumpns(struct sk_buff * skb,struct netlink_callback * cb)274 static int ioam6_genl_dumpns(struct sk_buff *skb, struct netlink_callback *cb)
275 {
276 struct rhashtable_iter *iter;
277 struct ioam6_namespace *ns;
278 int err;
279
280 iter = (struct rhashtable_iter *)cb->args[0];
281 rhashtable_walk_start(iter);
282
283 for (;;) {
284 ns = rhashtable_walk_next(iter);
285
286 if (IS_ERR(ns)) {
287 if (PTR_ERR(ns) == -EAGAIN)
288 continue;
289 err = PTR_ERR(ns);
290 goto done;
291 } else if (!ns) {
292 break;
293 }
294
295 err = __ioam6_genl_dumpns_element(ns,
296 NETLINK_CB(cb->skb).portid,
297 cb->nlh->nlmsg_seq,
298 NLM_F_MULTI,
299 skb,
300 IOAM6_CMD_DUMP_NAMESPACES);
301 if (err)
302 goto done;
303 }
304
305 err = skb->len;
306
307 done:
308 rhashtable_walk_stop(iter);
309 return err;
310 }
311
ioam6_genl_addsc(struct sk_buff * skb,struct genl_info * info)312 static int ioam6_genl_addsc(struct sk_buff *skb, struct genl_info *info)
313 {
314 struct ioam6_pernet_data *nsdata;
315 int len, len_aligned, err;
316 struct ioam6_schema *sc;
317 u32 id;
318
319 if (!info->attrs[IOAM6_ATTR_SC_ID] || !info->attrs[IOAM6_ATTR_SC_DATA])
320 return -EINVAL;
321
322 id = nla_get_u32(info->attrs[IOAM6_ATTR_SC_ID]);
323 nsdata = ioam6_pernet(genl_info_net(info));
324
325 mutex_lock(&nsdata->lock);
326
327 sc = rhashtable_lookup_fast(&nsdata->schemas, &id, rht_sc_params);
328 if (sc) {
329 err = -EEXIST;
330 goto out_unlock;
331 }
332
333 len = nla_len(info->attrs[IOAM6_ATTR_SC_DATA]);
334 len_aligned = ALIGN(len, 4);
335
336 sc = kzalloc(sizeof(*sc) + len_aligned, GFP_KERNEL);
337 if (!sc) {
338 err = -ENOMEM;
339 goto out_unlock;
340 }
341
342 sc->id = id;
343 sc->len = len_aligned;
344 sc->hdr = cpu_to_be32(sc->id | ((u8)(sc->len / 4) << 24));
345 nla_memcpy(sc->data, info->attrs[IOAM6_ATTR_SC_DATA], len);
346
347 err = rhashtable_lookup_insert_fast(&nsdata->schemas, &sc->head,
348 rht_sc_params);
349 if (err)
350 goto free_sc;
351
352 out_unlock:
353 mutex_unlock(&nsdata->lock);
354 return err;
355 free_sc:
356 kfree(sc);
357 goto out_unlock;
358 }
359
ioam6_genl_delsc(struct sk_buff * skb,struct genl_info * info)360 static int ioam6_genl_delsc(struct sk_buff *skb, struct genl_info *info)
361 {
362 struct ioam6_pernet_data *nsdata;
363 struct ioam6_namespace *ns;
364 struct ioam6_schema *sc;
365 int err;
366 u32 id;
367
368 if (!info->attrs[IOAM6_ATTR_SC_ID])
369 return -EINVAL;
370
371 id = nla_get_u32(info->attrs[IOAM6_ATTR_SC_ID]);
372 nsdata = ioam6_pernet(genl_info_net(info));
373
374 mutex_lock(&nsdata->lock);
375
376 sc = rhashtable_lookup_fast(&nsdata->schemas, &id, rht_sc_params);
377 if (!sc) {
378 err = -ENOENT;
379 goto out_unlock;
380 }
381
382 ns = rcu_dereference_protected(sc->ns, lockdep_is_held(&nsdata->lock));
383
384 err = rhashtable_remove_fast(&nsdata->schemas, &sc->head,
385 rht_sc_params);
386 if (err)
387 goto out_unlock;
388
389 if (ns)
390 rcu_assign_pointer(ns->schema, NULL);
391
392 ioam6_sc_release(sc);
393
394 out_unlock:
395 mutex_unlock(&nsdata->lock);
396 return err;
397 }
398
__ioam6_genl_dumpsc_element(struct ioam6_schema * sc,u32 portid,u32 seq,u32 flags,struct sk_buff * skb,u8 cmd)399 static int __ioam6_genl_dumpsc_element(struct ioam6_schema *sc,
400 u32 portid, u32 seq, u32 flags,
401 struct sk_buff *skb, u8 cmd)
402 {
403 struct ioam6_namespace *ns;
404 void *hdr;
405
406 hdr = genlmsg_put(skb, portid, seq, &ioam6_genl_family, flags, cmd);
407 if (!hdr)
408 return -ENOMEM;
409
410 if (nla_put_u32(skb, IOAM6_ATTR_SC_ID, sc->id) ||
411 nla_put(skb, IOAM6_ATTR_SC_DATA, sc->len, sc->data))
412 goto nla_put_failure;
413
414 rcu_read_lock();
415
416 ns = rcu_dereference(sc->ns);
417 if (ns && nla_put_u16(skb, IOAM6_ATTR_NS_ID, be16_to_cpu(ns->id))) {
418 rcu_read_unlock();
419 goto nla_put_failure;
420 }
421
422 rcu_read_unlock();
423
424 genlmsg_end(skb, hdr);
425 return 0;
426
427 nla_put_failure:
428 genlmsg_cancel(skb, hdr);
429 return -EMSGSIZE;
430 }
431
ioam6_genl_dumpsc_start(struct netlink_callback * cb)432 static int ioam6_genl_dumpsc_start(struct netlink_callback *cb)
433 {
434 struct ioam6_pernet_data *nsdata = ioam6_pernet(sock_net(cb->skb->sk));
435 struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
436
437 if (!iter) {
438 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
439 if (!iter)
440 return -ENOMEM;
441
442 cb->args[0] = (long)iter;
443 }
444
445 rhashtable_walk_enter(&nsdata->schemas, iter);
446
447 return 0;
448 }
449
ioam6_genl_dumpsc_done(struct netlink_callback * cb)450 static int ioam6_genl_dumpsc_done(struct netlink_callback *cb)
451 {
452 struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
453
454 rhashtable_walk_exit(iter);
455 kfree(iter);
456
457 return 0;
458 }
459
ioam6_genl_dumpsc(struct sk_buff * skb,struct netlink_callback * cb)460 static int ioam6_genl_dumpsc(struct sk_buff *skb, struct netlink_callback *cb)
461 {
462 struct rhashtable_iter *iter;
463 struct ioam6_schema *sc;
464 int err;
465
466 iter = (struct rhashtable_iter *)cb->args[0];
467 rhashtable_walk_start(iter);
468
469 for (;;) {
470 sc = rhashtable_walk_next(iter);
471
472 if (IS_ERR(sc)) {
473 if (PTR_ERR(sc) == -EAGAIN)
474 continue;
475 err = PTR_ERR(sc);
476 goto done;
477 } else if (!sc) {
478 break;
479 }
480
481 err = __ioam6_genl_dumpsc_element(sc,
482 NETLINK_CB(cb->skb).portid,
483 cb->nlh->nlmsg_seq,
484 NLM_F_MULTI,
485 skb,
486 IOAM6_CMD_DUMP_SCHEMAS);
487 if (err)
488 goto done;
489 }
490
491 err = skb->len;
492
493 done:
494 rhashtable_walk_stop(iter);
495 return err;
496 }
497
ioam6_genl_ns_set_schema(struct sk_buff * skb,struct genl_info * info)498 static int ioam6_genl_ns_set_schema(struct sk_buff *skb, struct genl_info *info)
499 {
500 struct ioam6_namespace *ns, *ns_ref;
501 struct ioam6_schema *sc, *sc_ref;
502 struct ioam6_pernet_data *nsdata;
503 __be16 ns_id;
504 u32 sc_id;
505 int err;
506
507 if (!info->attrs[IOAM6_ATTR_NS_ID] ||
508 (!info->attrs[IOAM6_ATTR_SC_ID] &&
509 !info->attrs[IOAM6_ATTR_SC_NONE]))
510 return -EINVAL;
511
512 ns_id = cpu_to_be16(nla_get_u16(info->attrs[IOAM6_ATTR_NS_ID]));
513 nsdata = ioam6_pernet(genl_info_net(info));
514
515 mutex_lock(&nsdata->lock);
516
517 ns = rhashtable_lookup_fast(&nsdata->namespaces, &ns_id, rht_ns_params);
518 if (!ns) {
519 err = -ENOENT;
520 goto out_unlock;
521 }
522
523 if (info->attrs[IOAM6_ATTR_SC_NONE]) {
524 sc = NULL;
525 } else {
526 sc_id = nla_get_u32(info->attrs[IOAM6_ATTR_SC_ID]);
527 sc = rhashtable_lookup_fast(&nsdata->schemas, &sc_id,
528 rht_sc_params);
529 if (!sc) {
530 err = -ENOENT;
531 goto out_unlock;
532 }
533 }
534
535 sc_ref = rcu_dereference_protected(ns->schema,
536 lockdep_is_held(&nsdata->lock));
537 if (sc_ref)
538 rcu_assign_pointer(sc_ref->ns, NULL);
539 rcu_assign_pointer(ns->schema, sc);
540
541 if (sc) {
542 ns_ref = rcu_dereference_protected(sc->ns,
543 lockdep_is_held(&nsdata->lock));
544 if (ns_ref)
545 rcu_assign_pointer(ns_ref->schema, NULL);
546 rcu_assign_pointer(sc->ns, ns);
547 }
548
549 err = 0;
550
551 out_unlock:
552 mutex_unlock(&nsdata->lock);
553 return err;
554 }
555
556 static const struct genl_ops ioam6_genl_ops[] = {
557 {
558 .cmd = IOAM6_CMD_ADD_NAMESPACE,
559 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
560 .doit = ioam6_genl_addns,
561 .flags = GENL_ADMIN_PERM,
562 .policy = ioam6_genl_policy_addns,
563 .maxattr = ARRAY_SIZE(ioam6_genl_policy_addns) - 1,
564 },
565 {
566 .cmd = IOAM6_CMD_DEL_NAMESPACE,
567 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
568 .doit = ioam6_genl_delns,
569 .flags = GENL_ADMIN_PERM,
570 .policy = ioam6_genl_policy_delns,
571 .maxattr = ARRAY_SIZE(ioam6_genl_policy_delns) - 1,
572 },
573 {
574 .cmd = IOAM6_CMD_DUMP_NAMESPACES,
575 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
576 .start = ioam6_genl_dumpns_start,
577 .dumpit = ioam6_genl_dumpns,
578 .done = ioam6_genl_dumpns_done,
579 .flags = GENL_ADMIN_PERM,
580 },
581 {
582 .cmd = IOAM6_CMD_ADD_SCHEMA,
583 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
584 .doit = ioam6_genl_addsc,
585 .flags = GENL_ADMIN_PERM,
586 .policy = ioam6_genl_policy_addsc,
587 .maxattr = ARRAY_SIZE(ioam6_genl_policy_addsc) - 1,
588 },
589 {
590 .cmd = IOAM6_CMD_DEL_SCHEMA,
591 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
592 .doit = ioam6_genl_delsc,
593 .flags = GENL_ADMIN_PERM,
594 .policy = ioam6_genl_policy_delsc,
595 .maxattr = ARRAY_SIZE(ioam6_genl_policy_delsc) - 1,
596 },
597 {
598 .cmd = IOAM6_CMD_DUMP_SCHEMAS,
599 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
600 .start = ioam6_genl_dumpsc_start,
601 .dumpit = ioam6_genl_dumpsc,
602 .done = ioam6_genl_dumpsc_done,
603 .flags = GENL_ADMIN_PERM,
604 },
605 {
606 .cmd = IOAM6_CMD_NS_SET_SCHEMA,
607 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
608 .doit = ioam6_genl_ns_set_schema,
609 .flags = GENL_ADMIN_PERM,
610 .policy = ioam6_genl_policy_ns_sc,
611 .maxattr = ARRAY_SIZE(ioam6_genl_policy_ns_sc) - 1,
612 },
613 };
614
615 static struct genl_family ioam6_genl_family __ro_after_init = {
616 .name = IOAM6_GENL_NAME,
617 .version = IOAM6_GENL_VERSION,
618 .netnsok = true,
619 .parallel_ops = true,
620 .ops = ioam6_genl_ops,
621 .n_ops = ARRAY_SIZE(ioam6_genl_ops),
622 .resv_start_op = IOAM6_CMD_NS_SET_SCHEMA + 1,
623 .module = THIS_MODULE,
624 };
625
ioam6_namespace(struct net * net,__be16 id)626 struct ioam6_namespace *ioam6_namespace(struct net *net, __be16 id)
627 {
628 struct ioam6_pernet_data *nsdata = ioam6_pernet(net);
629
630 return rhashtable_lookup_fast(&nsdata->namespaces, &id, rht_ns_params);
631 }
632
__ioam6_fill_trace_data(struct sk_buff * skb,struct ioam6_namespace * ns,struct ioam6_trace_hdr * trace,struct ioam6_schema * sc,u8 sclen,bool is_input)633 static void __ioam6_fill_trace_data(struct sk_buff *skb,
634 struct ioam6_namespace *ns,
635 struct ioam6_trace_hdr *trace,
636 struct ioam6_schema *sc,
637 u8 sclen, bool is_input)
638 {
639 struct timespec64 ts;
640 ktime_t tstamp;
641 u64 raw64;
642 u32 raw32;
643 u16 raw16;
644 u8 *data;
645 u8 byte;
646
647 data = trace->data + trace->remlen * 4 - trace->nodelen * 4 - sclen * 4;
648
649 /* hop_lim and node_id */
650 if (trace->type.bit0) {
651 byte = ipv6_hdr(skb)->hop_limit;
652 if (is_input)
653 byte--;
654
655 raw32 = dev_net(skb_dst(skb)->dev)->ipv6.sysctl.ioam6_id;
656
657 *(__be32 *)data = cpu_to_be32((byte << 24) | raw32);
658 data += sizeof(__be32);
659 }
660
661 /* ingress_if_id and egress_if_id */
662 if (trace->type.bit1) {
663 if (!skb->dev)
664 raw16 = IOAM6_U16_UNAVAILABLE;
665 else
666 raw16 = (__force u16)__in6_dev_get(skb->dev)->cnf.ioam6_id;
667
668 *(__be16 *)data = cpu_to_be16(raw16);
669 data += sizeof(__be16);
670
671 if (skb_dst(skb)->dev->flags & IFF_LOOPBACK)
672 raw16 = IOAM6_U16_UNAVAILABLE;
673 else
674 raw16 = (__force u16)__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id;
675
676 *(__be16 *)data = cpu_to_be16(raw16);
677 data += sizeof(__be16);
678 }
679
680 /* timestamp seconds */
681 if (trace->type.bit2) {
682 if (!skb->dev) {
683 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
684 } else {
685 tstamp = skb_tstamp_cond(skb, true);
686 ts = ktime_to_timespec64(tstamp);
687
688 *(__be32 *)data = cpu_to_be32((u32)ts.tv_sec);
689 }
690 data += sizeof(__be32);
691 }
692
693 /* timestamp subseconds */
694 if (trace->type.bit3) {
695 if (!skb->dev) {
696 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
697 } else {
698 if (!trace->type.bit2) {
699 tstamp = skb_tstamp_cond(skb, true);
700 ts = ktime_to_timespec64(tstamp);
701 }
702
703 *(__be32 *)data = cpu_to_be32((u32)(ts.tv_nsec / NSEC_PER_USEC));
704 }
705 data += sizeof(__be32);
706 }
707
708 /* transit delay */
709 if (trace->type.bit4) {
710 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
711 data += sizeof(__be32);
712 }
713
714 /* namespace data */
715 if (trace->type.bit5) {
716 *(__be32 *)data = ns->data;
717 data += sizeof(__be32);
718 }
719
720 /* queue depth */
721 if (trace->type.bit6) {
722 struct netdev_queue *queue;
723 struct Qdisc *qdisc;
724 __u32 qlen, backlog;
725
726 if (skb_dst(skb)->dev->flags & IFF_LOOPBACK) {
727 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
728 } else {
729 queue = skb_get_tx_queue(skb_dst(skb)->dev, skb);
730 qdisc = rcu_dereference(queue->qdisc);
731 qdisc_qstats_qlen_backlog(qdisc, &qlen, &backlog);
732
733 *(__be32 *)data = cpu_to_be32(backlog);
734 }
735 data += sizeof(__be32);
736 }
737
738 /* checksum complement */
739 if (trace->type.bit7) {
740 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
741 data += sizeof(__be32);
742 }
743
744 /* hop_lim and node_id (wide) */
745 if (trace->type.bit8) {
746 byte = ipv6_hdr(skb)->hop_limit;
747 if (is_input)
748 byte--;
749
750 raw64 = dev_net(skb_dst(skb)->dev)->ipv6.sysctl.ioam6_id_wide;
751
752 *(__be64 *)data = cpu_to_be64(((u64)byte << 56) | raw64);
753 data += sizeof(__be64);
754 }
755
756 /* ingress_if_id and egress_if_id (wide) */
757 if (trace->type.bit9) {
758 if (!skb->dev)
759 raw32 = IOAM6_U32_UNAVAILABLE;
760 else
761 raw32 = __in6_dev_get(skb->dev)->cnf.ioam6_id_wide;
762
763 *(__be32 *)data = cpu_to_be32(raw32);
764 data += sizeof(__be32);
765
766 if (skb_dst(skb)->dev->flags & IFF_LOOPBACK)
767 raw32 = IOAM6_U32_UNAVAILABLE;
768 else
769 raw32 = __in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id_wide;
770
771 *(__be32 *)data = cpu_to_be32(raw32);
772 data += sizeof(__be32);
773 }
774
775 /* namespace data (wide) */
776 if (trace->type.bit10) {
777 *(__be64 *)data = ns->data_wide;
778 data += sizeof(__be64);
779 }
780
781 /* buffer occupancy */
782 if (trace->type.bit11) {
783 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
784 data += sizeof(__be32);
785 }
786
787 /* bit12 undefined: filled with empty value */
788 if (trace->type.bit12) {
789 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
790 data += sizeof(__be32);
791 }
792
793 /* bit13 undefined: filled with empty value */
794 if (trace->type.bit13) {
795 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
796 data += sizeof(__be32);
797 }
798
799 /* bit14 undefined: filled with empty value */
800 if (trace->type.bit14) {
801 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
802 data += sizeof(__be32);
803 }
804
805 /* bit15 undefined: filled with empty value */
806 if (trace->type.bit15) {
807 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
808 data += sizeof(__be32);
809 }
810
811 /* bit16 undefined: filled with empty value */
812 if (trace->type.bit16) {
813 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
814 data += sizeof(__be32);
815 }
816
817 /* bit17 undefined: filled with empty value */
818 if (trace->type.bit17) {
819 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
820 data += sizeof(__be32);
821 }
822
823 /* bit18 undefined: filled with empty value */
824 if (trace->type.bit18) {
825 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
826 data += sizeof(__be32);
827 }
828
829 /* bit19 undefined: filled with empty value */
830 if (trace->type.bit19) {
831 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
832 data += sizeof(__be32);
833 }
834
835 /* bit20 undefined: filled with empty value */
836 if (trace->type.bit20) {
837 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
838 data += sizeof(__be32);
839 }
840
841 /* bit21 undefined: filled with empty value */
842 if (trace->type.bit21) {
843 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
844 data += sizeof(__be32);
845 }
846
847 /* opaque state snapshot */
848 if (trace->type.bit22) {
849 if (!sc) {
850 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE >> 8);
851 } else {
852 *(__be32 *)data = sc->hdr;
853 data += sizeof(__be32);
854
855 memcpy(data, sc->data, sc->len);
856 }
857 }
858 }
859
860 /* called with rcu_read_lock() */
ioam6_fill_trace_data(struct sk_buff * skb,struct ioam6_namespace * ns,struct ioam6_trace_hdr * trace,bool is_input)861 void ioam6_fill_trace_data(struct sk_buff *skb,
862 struct ioam6_namespace *ns,
863 struct ioam6_trace_hdr *trace,
864 bool is_input)
865 {
866 struct ioam6_schema *sc;
867 u8 sclen = 0;
868
869 /* Skip if Overflow flag is set
870 */
871 if (trace->overflow)
872 return;
873
874 /* NodeLen does not include Opaque State Snapshot length. We need to
875 * take it into account if the corresponding bit is set (bit 22) and
876 * if the current IOAM namespace has an active schema attached to it
877 */
878 sc = rcu_dereference(ns->schema);
879 if (trace->type.bit22) {
880 sclen = sizeof_field(struct ioam6_schema, hdr) / 4;
881
882 if (sc)
883 sclen += sc->len / 4;
884 }
885
886 /* If there is no space remaining, we set the Overflow flag and we
887 * skip without filling the trace
888 */
889 if (!trace->remlen || trace->remlen < trace->nodelen + sclen) {
890 trace->overflow = 1;
891 return;
892 }
893
894 __ioam6_fill_trace_data(skb, ns, trace, sc, sclen, is_input);
895 trace->remlen -= trace->nodelen + sclen;
896 }
897
ioam6_net_init(struct net * net)898 static int __net_init ioam6_net_init(struct net *net)
899 {
900 struct ioam6_pernet_data *nsdata;
901 int err = -ENOMEM;
902
903 nsdata = kzalloc(sizeof(*nsdata), GFP_KERNEL);
904 if (!nsdata)
905 goto out;
906
907 mutex_init(&nsdata->lock);
908 net->ipv6.ioam6_data = nsdata;
909
910 err = rhashtable_init(&nsdata->namespaces, &rht_ns_params);
911 if (err)
912 goto free_nsdata;
913
914 err = rhashtable_init(&nsdata->schemas, &rht_sc_params);
915 if (err)
916 goto free_rht_ns;
917
918 out:
919 return err;
920 free_rht_ns:
921 rhashtable_destroy(&nsdata->namespaces);
922 free_nsdata:
923 kfree(nsdata);
924 net->ipv6.ioam6_data = NULL;
925 goto out;
926 }
927
ioam6_net_exit(struct net * net)928 static void __net_exit ioam6_net_exit(struct net *net)
929 {
930 struct ioam6_pernet_data *nsdata = ioam6_pernet(net);
931
932 rhashtable_free_and_destroy(&nsdata->namespaces, ioam6_free_ns, NULL);
933 rhashtable_free_and_destroy(&nsdata->schemas, ioam6_free_sc, NULL);
934
935 kfree(nsdata);
936 }
937
938 static struct pernet_operations ioam6_net_ops = {
939 .init = ioam6_net_init,
940 .exit = ioam6_net_exit,
941 };
942
ioam6_init(void)943 int __init ioam6_init(void)
944 {
945 int err = register_pernet_subsys(&ioam6_net_ops);
946 if (err)
947 goto out;
948
949 err = genl_register_family(&ioam6_genl_family);
950 if (err)
951 goto out_unregister_pernet_subsys;
952
953 #ifdef CONFIG_IPV6_IOAM6_LWTUNNEL
954 err = ioam6_iptunnel_init();
955 if (err)
956 goto out_unregister_genl;
957 #endif
958
959 pr_info("In-situ OAM (IOAM) with IPv6\n");
960
961 out:
962 return err;
963 #ifdef CONFIG_IPV6_IOAM6_LWTUNNEL
964 out_unregister_genl:
965 genl_unregister_family(&ioam6_genl_family);
966 #endif
967 out_unregister_pernet_subsys:
968 unregister_pernet_subsys(&ioam6_net_ops);
969 goto out;
970 }
971
ioam6_exit(void)972 void ioam6_exit(void)
973 {
974 #ifdef CONFIG_IPV6_IOAM6_LWTUNNEL
975 ioam6_iptunnel_exit();
976 #endif
977 genl_unregister_family(&ioam6_genl_family);
978 unregister_pernet_subsys(&ioam6_net_ops);
979 }
980