1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * IPv6 IOAM Lightweight Tunnel implementation 4 * 5 * Author: 6 * Justin Iurman <justin.iurman@uliege.be> 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/skbuff.h> 11 #include <linux/net.h> 12 #include <linux/netlink.h> 13 #include <linux/in6.h> 14 #include <linux/ioam6.h> 15 #include <linux/ioam6_iptunnel.h> 16 #include <net/dst.h> 17 #include <net/sock.h> 18 #include <net/lwtunnel.h> 19 #include <net/ioam6.h> 20 21 #define IOAM6_MASK_SHORT_FIELDS 0xff100000 22 #define IOAM6_MASK_WIDE_FIELDS 0xe00000 23 24 struct ioam6_lwt_encap { 25 struct ipv6_hopopt_hdr eh; 26 u8 pad[2]; /* 2-octet padding for 4n-alignment */ 27 struct ioam6_hdr ioamh; 28 struct ioam6_trace_hdr traceh; 29 } __packed; 30 31 struct ioam6_lwt { 32 struct ioam6_lwt_encap tuninfo; 33 }; 34 35 static struct ioam6_lwt *ioam6_lwt_state(struct lwtunnel_state *lwt) 36 { 37 return (struct ioam6_lwt *)lwt->data; 38 } 39 40 static struct ioam6_lwt_encap *ioam6_lwt_info(struct lwtunnel_state *lwt) 41 { 42 return &ioam6_lwt_state(lwt)->tuninfo; 43 } 44 45 static struct ioam6_trace_hdr *ioam6_trace(struct lwtunnel_state *lwt) 46 { 47 return &(ioam6_lwt_state(lwt)->tuninfo.traceh); 48 } 49 50 static const struct nla_policy ioam6_iptunnel_policy[IOAM6_IPTUNNEL_MAX + 1] = { 51 [IOAM6_IPTUNNEL_TRACE] = NLA_POLICY_EXACT_LEN(sizeof(struct ioam6_trace_hdr)), 52 }; 53 54 static int nla_put_ioam6_trace(struct sk_buff *skb, int attrtype, 55 struct ioam6_trace_hdr *trace) 56 { 57 struct ioam6_trace_hdr *data; 58 struct nlattr *nla; 59 int len; 60 61 len = sizeof(*trace); 62 63 nla = nla_reserve(skb, attrtype, len); 64 if (!nla) 65 return -EMSGSIZE; 66 67 data = nla_data(nla); 68 memcpy(data, trace, len); 69 70 return 0; 71 } 72 73 static bool ioam6_validate_trace_hdr(struct ioam6_trace_hdr *trace) 74 { 75 u32 fields; 76 77 if (!trace->type_be32 || !trace->remlen || 78 trace->remlen > IOAM6_TRACE_DATA_SIZE_MAX / 4) 79 return false; 80 81 trace->nodelen = 0; 82 fields = be32_to_cpu(trace->type_be32); 83 84 trace->nodelen += hweight32(fields & IOAM6_MASK_SHORT_FIELDS) 85 * (sizeof(__be32) / 4); 86 trace->nodelen += hweight32(fields & IOAM6_MASK_WIDE_FIELDS) 87 * (sizeof(__be64) / 4); 88 89 return true; 90 } 91 92 static int ioam6_build_state(struct net *net, struct nlattr *nla, 93 unsigned int family, const void *cfg, 94 struct lwtunnel_state **ts, 95 struct netlink_ext_ack *extack) 96 { 97 struct nlattr *tb[IOAM6_IPTUNNEL_MAX + 1]; 98 struct ioam6_lwt_encap *tuninfo; 99 struct ioam6_trace_hdr *trace; 100 struct lwtunnel_state *s; 101 int len_aligned; 102 int len, err; 103 104 if (family != AF_INET6) 105 return -EINVAL; 106 107 err = nla_parse_nested(tb, IOAM6_IPTUNNEL_MAX, nla, 108 ioam6_iptunnel_policy, extack); 109 if (err < 0) 110 return err; 111 112 if (!tb[IOAM6_IPTUNNEL_TRACE]) { 113 NL_SET_ERR_MSG(extack, "missing trace"); 114 return -EINVAL; 115 } 116 117 trace = nla_data(tb[IOAM6_IPTUNNEL_TRACE]); 118 if (!ioam6_validate_trace_hdr(trace)) { 119 NL_SET_ERR_MSG_ATTR(extack, tb[IOAM6_IPTUNNEL_TRACE], 120 "invalid trace validation"); 121 return -EINVAL; 122 } 123 124 len = sizeof(*tuninfo) + trace->remlen * 4; 125 len_aligned = ALIGN(len, 8); 126 127 s = lwtunnel_state_alloc(len_aligned); 128 if (!s) 129 return -ENOMEM; 130 131 tuninfo = ioam6_lwt_info(s); 132 tuninfo->eh.hdrlen = (len_aligned >> 3) - 1; 133 tuninfo->pad[0] = IPV6_TLV_PADN; 134 tuninfo->ioamh.type = IOAM6_TYPE_PREALLOC; 135 tuninfo->ioamh.opt_type = IPV6_TLV_IOAM; 136 tuninfo->ioamh.opt_len = sizeof(tuninfo->ioamh) - 2 + sizeof(*trace) 137 + trace->remlen * 4; 138 139 memcpy(&tuninfo->traceh, trace, sizeof(*trace)); 140 141 len = len_aligned - len; 142 if (len == 1) { 143 tuninfo->traceh.data[trace->remlen * 4] = IPV6_TLV_PAD1; 144 } else if (len > 0) { 145 tuninfo->traceh.data[trace->remlen * 4] = IPV6_TLV_PADN; 146 tuninfo->traceh.data[trace->remlen * 4 + 1] = len - 2; 147 } 148 149 s->type = LWTUNNEL_ENCAP_IOAM6; 150 s->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT; 151 152 *ts = s; 153 154 return 0; 155 } 156 157 static int ioam6_do_inline(struct sk_buff *skb, struct ioam6_lwt_encap *tuninfo) 158 { 159 struct ioam6_trace_hdr *trace; 160 struct ipv6hdr *oldhdr, *hdr; 161 struct ioam6_namespace *ns; 162 int hdrlen, err; 163 164 hdrlen = (tuninfo->eh.hdrlen + 1) << 3; 165 166 err = skb_cow_head(skb, hdrlen + skb->mac_len); 167 if (unlikely(err)) 168 return err; 169 170 oldhdr = ipv6_hdr(skb); 171 skb_pull(skb, sizeof(*oldhdr)); 172 skb_postpull_rcsum(skb, skb_network_header(skb), sizeof(*oldhdr)); 173 174 skb_push(skb, sizeof(*oldhdr) + hdrlen); 175 skb_reset_network_header(skb); 176 skb_mac_header_rebuild(skb); 177 178 hdr = ipv6_hdr(skb); 179 memmove(hdr, oldhdr, sizeof(*oldhdr)); 180 tuninfo->eh.nexthdr = hdr->nexthdr; 181 182 skb_set_transport_header(skb, sizeof(*hdr)); 183 skb_postpush_rcsum(skb, hdr, sizeof(*hdr) + hdrlen); 184 185 memcpy(skb_transport_header(skb), (u8 *)tuninfo, hdrlen); 186 187 hdr->nexthdr = NEXTHDR_HOP; 188 hdr->payload_len = cpu_to_be16(skb->len - sizeof(*hdr)); 189 190 trace = (struct ioam6_trace_hdr *)(skb_transport_header(skb) 191 + sizeof(struct ipv6_hopopt_hdr) + 2 192 + sizeof(struct ioam6_hdr)); 193 194 ns = ioam6_namespace(dev_net(skb_dst(skb)->dev), trace->namespace_id); 195 if (ns) 196 ioam6_fill_trace_data(skb, ns, trace); 197 198 return 0; 199 } 200 201 static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb) 202 { 203 struct lwtunnel_state *lwt = skb_dst(skb)->lwtstate; 204 int err = -EINVAL; 205 206 if (skb->protocol != htons(ETH_P_IPV6)) 207 goto drop; 208 209 /* Only for packets we send and 210 * that do not contain a Hop-by-Hop yet 211 */ 212 if (skb->dev || ipv6_hdr(skb)->nexthdr == NEXTHDR_HOP) 213 goto out; 214 215 err = ioam6_do_inline(skb, ioam6_lwt_info(lwt)); 216 if (unlikely(err)) 217 goto drop; 218 219 err = skb_cow_head(skb, LL_RESERVED_SPACE(skb_dst(skb)->dev)); 220 if (unlikely(err)) 221 goto drop; 222 223 out: 224 return lwt->orig_output(net, sk, skb); 225 226 drop: 227 kfree_skb(skb); 228 return err; 229 } 230 231 static int ioam6_fill_encap_info(struct sk_buff *skb, 232 struct lwtunnel_state *lwtstate) 233 { 234 struct ioam6_trace_hdr *trace = ioam6_trace(lwtstate); 235 236 if (nla_put_ioam6_trace(skb, IOAM6_IPTUNNEL_TRACE, trace)) 237 return -EMSGSIZE; 238 239 return 0; 240 } 241 242 static int ioam6_encap_nlsize(struct lwtunnel_state *lwtstate) 243 { 244 struct ioam6_trace_hdr *trace = ioam6_trace(lwtstate); 245 246 return nla_total_size(sizeof(*trace)); 247 } 248 249 static int ioam6_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b) 250 { 251 struct ioam6_trace_hdr *a_hdr = ioam6_trace(a); 252 struct ioam6_trace_hdr *b_hdr = ioam6_trace(b); 253 254 return (a_hdr->namespace_id != b_hdr->namespace_id); 255 } 256 257 static const struct lwtunnel_encap_ops ioam6_iptun_ops = { 258 .build_state = ioam6_build_state, 259 .output = ioam6_output, 260 .fill_encap = ioam6_fill_encap_info, 261 .get_encap_size = ioam6_encap_nlsize, 262 .cmp_encap = ioam6_encap_cmp, 263 .owner = THIS_MODULE, 264 }; 265 266 int __init ioam6_iptunnel_init(void) 267 { 268 return lwtunnel_encap_add_ops(&ioam6_iptun_ops, LWTUNNEL_ENCAP_IOAM6); 269 } 270 271 void ioam6_iptunnel_exit(void) 272 { 273 lwtunnel_encap_del_ops(&ioam6_iptun_ops, LWTUNNEL_ENCAP_IOAM6); 274 } 275