xref: /openbmc/linux/net/ipv6/ioam6_iptunnel.c (revision b9221f71)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *  IPv6 IOAM Lightweight Tunnel implementation
4  *
5  *  Author:
6  *  Justin Iurman <justin.iurman@uliege.be>
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/net.h>
12 #include <linux/netlink.h>
13 #include <linux/in6.h>
14 #include <linux/ioam6.h>
15 #include <linux/ioam6_iptunnel.h>
16 #include <net/dst.h>
17 #include <net/sock.h>
18 #include <net/lwtunnel.h>
19 #include <net/ioam6.h>
20 
21 #define IOAM6_MASK_SHORT_FIELDS 0xff100000
22 #define IOAM6_MASK_WIDE_FIELDS 0xe00000
23 
24 struct ioam6_lwt_encap {
25 	struct ipv6_hopopt_hdr	eh;
26 	u8			pad[2];	/* 2-octet padding for 4n-alignment */
27 	struct ioam6_hdr	ioamh;
28 	struct ioam6_trace_hdr	traceh;
29 } __packed;
30 
31 struct ioam6_lwt {
32 	struct ioam6_lwt_encap	tuninfo;
33 };
34 
35 static struct ioam6_lwt *ioam6_lwt_state(struct lwtunnel_state *lwt)
36 {
37 	return (struct ioam6_lwt *)lwt->data;
38 }
39 
40 static struct ioam6_lwt_encap *ioam6_lwt_info(struct lwtunnel_state *lwt)
41 {
42 	return &ioam6_lwt_state(lwt)->tuninfo;
43 }
44 
45 static struct ioam6_trace_hdr *ioam6_trace(struct lwtunnel_state *lwt)
46 {
47 	return &(ioam6_lwt_state(lwt)->tuninfo.traceh);
48 }
49 
50 static const struct nla_policy ioam6_iptunnel_policy[IOAM6_IPTUNNEL_MAX + 1] = {
51 	[IOAM6_IPTUNNEL_TRACE]	= NLA_POLICY_EXACT_LEN(sizeof(struct ioam6_trace_hdr)),
52 };
53 
54 static int nla_put_ioam6_trace(struct sk_buff *skb, int attrtype,
55 			       struct ioam6_trace_hdr *trace)
56 {
57 	struct ioam6_trace_hdr *data;
58 	struct nlattr *nla;
59 	int len;
60 
61 	len = sizeof(*trace);
62 
63 	nla = nla_reserve(skb, attrtype, len);
64 	if (!nla)
65 		return -EMSGSIZE;
66 
67 	data = nla_data(nla);
68 	memcpy(data, trace, len);
69 
70 	return 0;
71 }
72 
73 static bool ioam6_validate_trace_hdr(struct ioam6_trace_hdr *trace)
74 {
75 	u32 fields;
76 
77 	if (!trace->type_be32 || !trace->remlen ||
78 	    trace->remlen > IOAM6_TRACE_DATA_SIZE_MAX / 4 ||
79 	    trace->type.bit12 | trace->type.bit13 | trace->type.bit14 |
80 	    trace->type.bit15 | trace->type.bit16 | trace->type.bit17 |
81 	    trace->type.bit18 | trace->type.bit19 | trace->type.bit20 |
82 	    trace->type.bit21)
83 		return false;
84 
85 	trace->nodelen = 0;
86 	fields = be32_to_cpu(trace->type_be32);
87 
88 	trace->nodelen += hweight32(fields & IOAM6_MASK_SHORT_FIELDS)
89 				* (sizeof(__be32) / 4);
90 	trace->nodelen += hweight32(fields & IOAM6_MASK_WIDE_FIELDS)
91 				* (sizeof(__be64) / 4);
92 
93 	return true;
94 }
95 
96 static int ioam6_build_state(struct net *net, struct nlattr *nla,
97 			     unsigned int family, const void *cfg,
98 			     struct lwtunnel_state **ts,
99 			     struct netlink_ext_ack *extack)
100 {
101 	struct nlattr *tb[IOAM6_IPTUNNEL_MAX + 1];
102 	struct ioam6_lwt_encap *tuninfo;
103 	struct ioam6_trace_hdr *trace;
104 	struct lwtunnel_state *s;
105 	int len_aligned;
106 	int len, err;
107 
108 	if (family != AF_INET6)
109 		return -EINVAL;
110 
111 	err = nla_parse_nested(tb, IOAM6_IPTUNNEL_MAX, nla,
112 			       ioam6_iptunnel_policy, extack);
113 	if (err < 0)
114 		return err;
115 
116 	if (!tb[IOAM6_IPTUNNEL_TRACE]) {
117 		NL_SET_ERR_MSG(extack, "missing trace");
118 		return -EINVAL;
119 	}
120 
121 	trace = nla_data(tb[IOAM6_IPTUNNEL_TRACE]);
122 	if (!ioam6_validate_trace_hdr(trace)) {
123 		NL_SET_ERR_MSG_ATTR(extack, tb[IOAM6_IPTUNNEL_TRACE],
124 				    "invalid trace validation");
125 		return -EINVAL;
126 	}
127 
128 	len = sizeof(*tuninfo) + trace->remlen * 4;
129 	len_aligned = ALIGN(len, 8);
130 
131 	s = lwtunnel_state_alloc(len_aligned);
132 	if (!s)
133 		return -ENOMEM;
134 
135 	tuninfo = ioam6_lwt_info(s);
136 	tuninfo->eh.hdrlen = (len_aligned >> 3) - 1;
137 	tuninfo->pad[0] = IPV6_TLV_PADN;
138 	tuninfo->ioamh.type = IOAM6_TYPE_PREALLOC;
139 	tuninfo->ioamh.opt_type = IPV6_TLV_IOAM;
140 	tuninfo->ioamh.opt_len = sizeof(tuninfo->ioamh) - 2 + sizeof(*trace)
141 					+ trace->remlen * 4;
142 
143 	memcpy(&tuninfo->traceh, trace, sizeof(*trace));
144 
145 	len = len_aligned - len;
146 	if (len == 1) {
147 		tuninfo->traceh.data[trace->remlen * 4] = IPV6_TLV_PAD1;
148 	} else if (len > 0) {
149 		tuninfo->traceh.data[trace->remlen * 4] = IPV6_TLV_PADN;
150 		tuninfo->traceh.data[trace->remlen * 4 + 1] = len - 2;
151 	}
152 
153 	s->type = LWTUNNEL_ENCAP_IOAM6;
154 	s->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT;
155 
156 	*ts = s;
157 
158 	return 0;
159 }
160 
161 static int ioam6_do_inline(struct sk_buff *skb, struct ioam6_lwt_encap *tuninfo)
162 {
163 	struct ioam6_trace_hdr *trace;
164 	struct ipv6hdr *oldhdr, *hdr;
165 	struct ioam6_namespace *ns;
166 	int hdrlen, err;
167 
168 	hdrlen = (tuninfo->eh.hdrlen + 1) << 3;
169 
170 	err = skb_cow_head(skb, hdrlen + skb->mac_len);
171 	if (unlikely(err))
172 		return err;
173 
174 	oldhdr = ipv6_hdr(skb);
175 	skb_pull(skb, sizeof(*oldhdr));
176 	skb_postpull_rcsum(skb, skb_network_header(skb), sizeof(*oldhdr));
177 
178 	skb_push(skb, sizeof(*oldhdr) + hdrlen);
179 	skb_reset_network_header(skb);
180 	skb_mac_header_rebuild(skb);
181 
182 	hdr = ipv6_hdr(skb);
183 	memmove(hdr, oldhdr, sizeof(*oldhdr));
184 	tuninfo->eh.nexthdr = hdr->nexthdr;
185 
186 	skb_set_transport_header(skb, sizeof(*hdr));
187 	skb_postpush_rcsum(skb, hdr, sizeof(*hdr) + hdrlen);
188 
189 	memcpy(skb_transport_header(skb), (u8 *)tuninfo, hdrlen);
190 
191 	hdr->nexthdr = NEXTHDR_HOP;
192 	hdr->payload_len = cpu_to_be16(skb->len - sizeof(*hdr));
193 
194 	trace = (struct ioam6_trace_hdr *)(skb_transport_header(skb)
195 					   + sizeof(struct ipv6_hopopt_hdr) + 2
196 					   + sizeof(struct ioam6_hdr));
197 
198 	ns = ioam6_namespace(dev_net(skb_dst(skb)->dev), trace->namespace_id);
199 	if (ns)
200 		ioam6_fill_trace_data(skb, ns, trace);
201 
202 	return 0;
203 }
204 
205 static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
206 {
207 	struct lwtunnel_state *lwt = skb_dst(skb)->lwtstate;
208 	int err = -EINVAL;
209 
210 	if (skb->protocol != htons(ETH_P_IPV6))
211 		goto drop;
212 
213 	/* Only for packets we send and
214 	 * that do not contain a Hop-by-Hop yet
215 	 */
216 	if (skb->dev || ipv6_hdr(skb)->nexthdr == NEXTHDR_HOP)
217 		goto out;
218 
219 	err = ioam6_do_inline(skb, ioam6_lwt_info(lwt));
220 	if (unlikely(err))
221 		goto drop;
222 
223 	err = skb_cow_head(skb, LL_RESERVED_SPACE(skb_dst(skb)->dev));
224 	if (unlikely(err))
225 		goto drop;
226 
227 out:
228 	return lwt->orig_output(net, sk, skb);
229 
230 drop:
231 	kfree_skb(skb);
232 	return err;
233 }
234 
235 static int ioam6_fill_encap_info(struct sk_buff *skb,
236 				 struct lwtunnel_state *lwtstate)
237 {
238 	struct ioam6_trace_hdr *trace = ioam6_trace(lwtstate);
239 
240 	if (nla_put_ioam6_trace(skb, IOAM6_IPTUNNEL_TRACE, trace))
241 		return -EMSGSIZE;
242 
243 	return 0;
244 }
245 
246 static int ioam6_encap_nlsize(struct lwtunnel_state *lwtstate)
247 {
248 	struct ioam6_trace_hdr *trace = ioam6_trace(lwtstate);
249 
250 	return nla_total_size(sizeof(*trace));
251 }
252 
253 static int ioam6_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
254 {
255 	struct ioam6_trace_hdr *a_hdr = ioam6_trace(a);
256 	struct ioam6_trace_hdr *b_hdr = ioam6_trace(b);
257 
258 	return (a_hdr->namespace_id != b_hdr->namespace_id);
259 }
260 
261 static const struct lwtunnel_encap_ops ioam6_iptun_ops = {
262 	.build_state	= ioam6_build_state,
263 	.output		= ioam6_output,
264 	.fill_encap	= ioam6_fill_encap_info,
265 	.get_encap_size	= ioam6_encap_nlsize,
266 	.cmp_encap	= ioam6_encap_cmp,
267 	.owner		= THIS_MODULE,
268 };
269 
270 int __init ioam6_iptunnel_init(void)
271 {
272 	return lwtunnel_encap_add_ops(&ioam6_iptun_ops, LWTUNNEL_ENCAP_IOAM6);
273 }
274 
275 void ioam6_iptunnel_exit(void)
276 {
277 	lwtunnel_encap_del_ops(&ioam6_iptun_ops, LWTUNNEL_ENCAP_IOAM6);
278 }
279