1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us> 4 */ 5 6 #include <linux/module.h> 7 #include <linux/init.h> 8 #include <linux/kernel.h> 9 #include <linux/skbuff.h> 10 #include <linux/rtnetlink.h> 11 #include <linux/if_vlan.h> 12 #include <net/netlink.h> 13 #include <net/pkt_sched.h> 14 #include <net/pkt_cls.h> 15 16 #include <linux/tc_act/tc_vlan.h> 17 #include <net/tc_act/tc_vlan.h> 18 19 static unsigned int vlan_net_id; 20 static struct tc_action_ops act_vlan_ops; 21 22 static int tcf_vlan_act(struct sk_buff *skb, const struct tc_action *a, 23 struct tcf_result *res) 24 { 25 struct tcf_vlan *v = to_vlan(a); 26 struct tcf_vlan_params *p; 27 int action; 28 int err; 29 u16 tci; 30 31 tcf_lastuse_update(&v->tcf_tm); 32 tcf_action_update_bstats(&v->common, skb); 33 34 /* Ensure 'data' points at mac_header prior calling vlan manipulating 35 * functions. 36 */ 37 if (skb_at_tc_ingress(skb)) 38 skb_push_rcsum(skb, skb->mac_len); 39 40 action = READ_ONCE(v->tcf_action); 41 42 p = rcu_dereference_bh(v->vlan_p); 43 44 switch (p->tcfv_action) { 45 case TCA_VLAN_ACT_POP: 46 err = skb_vlan_pop(skb); 47 if (err) 48 goto drop; 49 break; 50 case TCA_VLAN_ACT_PUSH: 51 err = skb_vlan_push(skb, p->tcfv_push_proto, p->tcfv_push_vid | 52 (p->tcfv_push_prio << VLAN_PRIO_SHIFT)); 53 if (err) 54 goto drop; 55 break; 56 case TCA_VLAN_ACT_MODIFY: 57 /* No-op if no vlan tag (either hw-accel or in-payload) */ 58 if (!skb_vlan_tagged(skb)) 59 goto out; 60 /* extract existing tag (and guarantee no hw-accel tag) */ 61 if (skb_vlan_tag_present(skb)) { 62 tci = skb_vlan_tag_get(skb); 63 __vlan_hwaccel_clear_tag(skb); 64 } else { 65 /* in-payload vlan tag, pop it */ 66 err = __skb_vlan_pop(skb, &tci); 67 if (err) 68 goto drop; 69 } 70 /* replace the vid */ 71 tci = (tci & ~VLAN_VID_MASK) | p->tcfv_push_vid; 72 /* replace prio bits, if tcfv_push_prio specified */ 73 if (p->tcfv_push_prio_exists) { 74 tci &= ~VLAN_PRIO_MASK; 75 tci |= p->tcfv_push_prio << VLAN_PRIO_SHIFT; 76 } 77 /* put updated tci as hwaccel tag */ 78 __vlan_hwaccel_put_tag(skb, p->tcfv_push_proto, tci); 79 break; 80 case TCA_VLAN_ACT_POP_ETH: 81 err = skb_eth_pop(skb); 82 if (err) 83 goto drop; 84 break; 85 case TCA_VLAN_ACT_PUSH_ETH: 86 err = skb_eth_push(skb, p->tcfv_push_dst, p->tcfv_push_src); 87 if (err) 88 goto drop; 89 break; 90 default: 91 BUG(); 92 } 93 94 out: 95 if (skb_at_tc_ingress(skb)) 96 skb_pull_rcsum(skb, skb->mac_len); 97 98 return action; 99 100 drop: 101 tcf_action_inc_drop_qstats(&v->common); 102 return TC_ACT_SHOT; 103 } 104 105 static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = { 106 [TCA_VLAN_UNSPEC] = { .strict_start_type = TCA_VLAN_PUSH_ETH_DST }, 107 [TCA_VLAN_PARMS] = { .len = sizeof(struct tc_vlan) }, 108 [TCA_VLAN_PUSH_VLAN_ID] = { .type = NLA_U16 }, 109 [TCA_VLAN_PUSH_VLAN_PROTOCOL] = { .type = NLA_U16 }, 110 [TCA_VLAN_PUSH_VLAN_PRIORITY] = { .type = NLA_U8 }, 111 [TCA_VLAN_PUSH_ETH_DST] = NLA_POLICY_ETH_ADDR, 112 [TCA_VLAN_PUSH_ETH_SRC] = NLA_POLICY_ETH_ADDR, 113 }; 114 115 static int tcf_vlan_init(struct net *net, struct nlattr *nla, 116 struct nlattr *est, struct tc_action **a, 117 struct tcf_proto *tp, u32 flags, 118 struct netlink_ext_ack *extack) 119 { 120 struct tc_action_net *tn = net_generic(net, vlan_net_id); 121 bool bind = flags & TCA_ACT_FLAGS_BIND; 122 struct nlattr *tb[TCA_VLAN_MAX + 1]; 123 struct tcf_chain *goto_ch = NULL; 124 bool push_prio_exists = false; 125 struct tcf_vlan_params *p; 126 struct tc_vlan *parm; 127 struct tcf_vlan *v; 128 int action; 129 u16 push_vid = 0; 130 __be16 push_proto = 0; 131 u8 push_prio = 0; 132 bool exists = false; 133 int ret = 0, err; 134 u32 index; 135 136 if (!nla) 137 return -EINVAL; 138 139 err = nla_parse_nested_deprecated(tb, TCA_VLAN_MAX, nla, vlan_policy, 140 NULL); 141 if (err < 0) 142 return err; 143 144 if (!tb[TCA_VLAN_PARMS]) 145 return -EINVAL; 146 parm = nla_data(tb[TCA_VLAN_PARMS]); 147 index = parm->index; 148 err = tcf_idr_check_alloc(tn, &index, a, bind); 149 if (err < 0) 150 return err; 151 exists = err; 152 if (exists && bind) 153 return 0; 154 155 switch (parm->v_action) { 156 case TCA_VLAN_ACT_POP: 157 break; 158 case TCA_VLAN_ACT_PUSH: 159 case TCA_VLAN_ACT_MODIFY: 160 if (!tb[TCA_VLAN_PUSH_VLAN_ID]) { 161 if (exists) 162 tcf_idr_release(*a, bind); 163 else 164 tcf_idr_cleanup(tn, index); 165 return -EINVAL; 166 } 167 push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]); 168 if (push_vid >= VLAN_VID_MASK) { 169 if (exists) 170 tcf_idr_release(*a, bind); 171 else 172 tcf_idr_cleanup(tn, index); 173 return -ERANGE; 174 } 175 176 if (tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]) { 177 push_proto = nla_get_be16(tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]); 178 switch (push_proto) { 179 case htons(ETH_P_8021Q): 180 case htons(ETH_P_8021AD): 181 break; 182 default: 183 if (exists) 184 tcf_idr_release(*a, bind); 185 else 186 tcf_idr_cleanup(tn, index); 187 return -EPROTONOSUPPORT; 188 } 189 } else { 190 push_proto = htons(ETH_P_8021Q); 191 } 192 193 push_prio_exists = !!tb[TCA_VLAN_PUSH_VLAN_PRIORITY]; 194 if (push_prio_exists) 195 push_prio = nla_get_u8(tb[TCA_VLAN_PUSH_VLAN_PRIORITY]); 196 break; 197 case TCA_VLAN_ACT_POP_ETH: 198 break; 199 case TCA_VLAN_ACT_PUSH_ETH: 200 if (!tb[TCA_VLAN_PUSH_ETH_DST] || !tb[TCA_VLAN_PUSH_ETH_SRC]) { 201 if (exists) 202 tcf_idr_release(*a, bind); 203 else 204 tcf_idr_cleanup(tn, index); 205 return -EINVAL; 206 } 207 break; 208 default: 209 if (exists) 210 tcf_idr_release(*a, bind); 211 else 212 tcf_idr_cleanup(tn, index); 213 return -EINVAL; 214 } 215 action = parm->v_action; 216 217 if (!exists) { 218 ret = tcf_idr_create_from_flags(tn, index, est, a, 219 &act_vlan_ops, bind, flags); 220 if (ret) { 221 tcf_idr_cleanup(tn, index); 222 return ret; 223 } 224 225 ret = ACT_P_CREATED; 226 } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) { 227 tcf_idr_release(*a, bind); 228 return -EEXIST; 229 } 230 231 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); 232 if (err < 0) 233 goto release_idr; 234 235 v = to_vlan(*a); 236 237 p = kzalloc(sizeof(*p), GFP_KERNEL); 238 if (!p) { 239 err = -ENOMEM; 240 goto put_chain; 241 } 242 243 p->tcfv_action = action; 244 p->tcfv_push_vid = push_vid; 245 p->tcfv_push_prio = push_prio; 246 p->tcfv_push_prio_exists = push_prio_exists || action == TCA_VLAN_ACT_PUSH; 247 p->tcfv_push_proto = push_proto; 248 249 if (action == TCA_VLAN_ACT_PUSH_ETH) { 250 nla_memcpy(&p->tcfv_push_dst, tb[TCA_VLAN_PUSH_ETH_DST], 251 ETH_ALEN); 252 nla_memcpy(&p->tcfv_push_src, tb[TCA_VLAN_PUSH_ETH_SRC], 253 ETH_ALEN); 254 } 255 256 spin_lock_bh(&v->tcf_lock); 257 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); 258 p = rcu_replace_pointer(v->vlan_p, p, lockdep_is_held(&v->tcf_lock)); 259 spin_unlock_bh(&v->tcf_lock); 260 261 if (goto_ch) 262 tcf_chain_put_by_act(goto_ch); 263 if (p) 264 kfree_rcu(p, rcu); 265 266 return ret; 267 put_chain: 268 if (goto_ch) 269 tcf_chain_put_by_act(goto_ch); 270 release_idr: 271 tcf_idr_release(*a, bind); 272 return err; 273 } 274 275 static void tcf_vlan_cleanup(struct tc_action *a) 276 { 277 struct tcf_vlan *v = to_vlan(a); 278 struct tcf_vlan_params *p; 279 280 p = rcu_dereference_protected(v->vlan_p, 1); 281 if (p) 282 kfree_rcu(p, rcu); 283 } 284 285 static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a, 286 int bind, int ref) 287 { 288 unsigned char *b = skb_tail_pointer(skb); 289 struct tcf_vlan *v = to_vlan(a); 290 struct tcf_vlan_params *p; 291 struct tc_vlan opt = { 292 .index = v->tcf_index, 293 .refcnt = refcount_read(&v->tcf_refcnt) - ref, 294 .bindcnt = atomic_read(&v->tcf_bindcnt) - bind, 295 }; 296 struct tcf_t t; 297 298 spin_lock_bh(&v->tcf_lock); 299 opt.action = v->tcf_action; 300 p = rcu_dereference_protected(v->vlan_p, lockdep_is_held(&v->tcf_lock)); 301 opt.v_action = p->tcfv_action; 302 if (nla_put(skb, TCA_VLAN_PARMS, sizeof(opt), &opt)) 303 goto nla_put_failure; 304 305 if ((p->tcfv_action == TCA_VLAN_ACT_PUSH || 306 p->tcfv_action == TCA_VLAN_ACT_MODIFY) && 307 (nla_put_u16(skb, TCA_VLAN_PUSH_VLAN_ID, p->tcfv_push_vid) || 308 nla_put_be16(skb, TCA_VLAN_PUSH_VLAN_PROTOCOL, 309 p->tcfv_push_proto) || 310 (p->tcfv_push_prio_exists && 311 nla_put_u8(skb, TCA_VLAN_PUSH_VLAN_PRIORITY, p->tcfv_push_prio)))) 312 goto nla_put_failure; 313 314 if (p->tcfv_action == TCA_VLAN_ACT_PUSH_ETH) { 315 if (nla_put(skb, TCA_VLAN_PUSH_ETH_DST, ETH_ALEN, 316 p->tcfv_push_dst)) 317 goto nla_put_failure; 318 if (nla_put(skb, TCA_VLAN_PUSH_ETH_SRC, ETH_ALEN, 319 p->tcfv_push_src)) 320 goto nla_put_failure; 321 } 322 323 tcf_tm_dump(&t, &v->tcf_tm); 324 if (nla_put_64bit(skb, TCA_VLAN_TM, sizeof(t), &t, TCA_VLAN_PAD)) 325 goto nla_put_failure; 326 spin_unlock_bh(&v->tcf_lock); 327 328 return skb->len; 329 330 nla_put_failure: 331 spin_unlock_bh(&v->tcf_lock); 332 nlmsg_trim(skb, b); 333 return -1; 334 } 335 336 static int tcf_vlan_walker(struct net *net, struct sk_buff *skb, 337 struct netlink_callback *cb, int type, 338 const struct tc_action_ops *ops, 339 struct netlink_ext_ack *extack) 340 { 341 struct tc_action_net *tn = net_generic(net, vlan_net_id); 342 343 return tcf_generic_walker(tn, skb, cb, type, ops, extack); 344 } 345 346 static void tcf_vlan_stats_update(struct tc_action *a, u64 bytes, u64 packets, 347 u64 drops, u64 lastuse, bool hw) 348 { 349 struct tcf_vlan *v = to_vlan(a); 350 struct tcf_t *tm = &v->tcf_tm; 351 352 tcf_action_update_stats(a, bytes, packets, drops, hw); 353 tm->lastuse = max_t(u64, tm->lastuse, lastuse); 354 } 355 356 static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index) 357 { 358 struct tc_action_net *tn = net_generic(net, vlan_net_id); 359 360 return tcf_idr_search(tn, a, index); 361 } 362 363 static size_t tcf_vlan_get_fill_size(const struct tc_action *act) 364 { 365 return nla_total_size(sizeof(struct tc_vlan)) 366 + nla_total_size(sizeof(u16)) /* TCA_VLAN_PUSH_VLAN_ID */ 367 + nla_total_size(sizeof(u16)) /* TCA_VLAN_PUSH_VLAN_PROTOCOL */ 368 + nla_total_size(sizeof(u8)); /* TCA_VLAN_PUSH_VLAN_PRIORITY */ 369 } 370 371 static int tcf_vlan_offload_act_setup(struct tc_action *act, void *entry_data, 372 u32 *index_inc, bool bind, 373 struct netlink_ext_ack *extack) 374 { 375 if (bind) { 376 struct flow_action_entry *entry = entry_data; 377 378 switch (tcf_vlan_action(act)) { 379 case TCA_VLAN_ACT_PUSH: 380 entry->id = FLOW_ACTION_VLAN_PUSH; 381 entry->vlan.vid = tcf_vlan_push_vid(act); 382 entry->vlan.proto = tcf_vlan_push_proto(act); 383 entry->vlan.prio = tcf_vlan_push_prio(act); 384 break; 385 case TCA_VLAN_ACT_POP: 386 entry->id = FLOW_ACTION_VLAN_POP; 387 break; 388 case TCA_VLAN_ACT_MODIFY: 389 entry->id = FLOW_ACTION_VLAN_MANGLE; 390 entry->vlan.vid = tcf_vlan_push_vid(act); 391 entry->vlan.proto = tcf_vlan_push_proto(act); 392 entry->vlan.prio = tcf_vlan_push_prio(act); 393 break; 394 case TCA_VLAN_ACT_POP_ETH: 395 entry->id = FLOW_ACTION_VLAN_POP_ETH; 396 break; 397 case TCA_VLAN_ACT_PUSH_ETH: 398 entry->id = FLOW_ACTION_VLAN_PUSH_ETH; 399 tcf_vlan_push_eth(entry->vlan_push_eth.src, entry->vlan_push_eth.dst, act); 400 break; 401 default: 402 NL_SET_ERR_MSG_MOD(extack, "Unsupported vlan action mode offload"); 403 return -EOPNOTSUPP; 404 } 405 *index_inc = 1; 406 } else { 407 struct flow_offload_action *fl_action = entry_data; 408 409 switch (tcf_vlan_action(act)) { 410 case TCA_VLAN_ACT_PUSH: 411 fl_action->id = FLOW_ACTION_VLAN_PUSH; 412 break; 413 case TCA_VLAN_ACT_POP: 414 fl_action->id = FLOW_ACTION_VLAN_POP; 415 break; 416 case TCA_VLAN_ACT_MODIFY: 417 fl_action->id = FLOW_ACTION_VLAN_MANGLE; 418 break; 419 case TCA_VLAN_ACT_POP_ETH: 420 fl_action->id = FLOW_ACTION_VLAN_POP_ETH; 421 break; 422 case TCA_VLAN_ACT_PUSH_ETH: 423 fl_action->id = FLOW_ACTION_VLAN_PUSH_ETH; 424 break; 425 default: 426 return -EOPNOTSUPP; 427 } 428 } 429 430 return 0; 431 } 432 433 static struct tc_action_ops act_vlan_ops = { 434 .kind = "vlan", 435 .id = TCA_ID_VLAN, 436 .owner = THIS_MODULE, 437 .act = tcf_vlan_act, 438 .dump = tcf_vlan_dump, 439 .init = tcf_vlan_init, 440 .cleanup = tcf_vlan_cleanup, 441 .walk = tcf_vlan_walker, 442 .stats_update = tcf_vlan_stats_update, 443 .get_fill_size = tcf_vlan_get_fill_size, 444 .lookup = tcf_vlan_search, 445 .offload_act_setup = tcf_vlan_offload_act_setup, 446 .size = sizeof(struct tcf_vlan), 447 }; 448 449 static __net_init int vlan_init_net(struct net *net) 450 { 451 struct tc_action_net *tn = net_generic(net, vlan_net_id); 452 453 return tc_action_net_init(net, tn, &act_vlan_ops); 454 } 455 456 static void __net_exit vlan_exit_net(struct list_head *net_list) 457 { 458 tc_action_net_exit(net_list, vlan_net_id); 459 } 460 461 static struct pernet_operations vlan_net_ops = { 462 .init = vlan_init_net, 463 .exit_batch = vlan_exit_net, 464 .id = &vlan_net_id, 465 .size = sizeof(struct tc_action_net), 466 }; 467 468 static int __init vlan_init_module(void) 469 { 470 return tcf_register_action(&act_vlan_ops, &vlan_net_ops); 471 } 472 473 static void __exit vlan_cleanup_module(void) 474 { 475 tcf_unregister_action(&act_vlan_ops, &vlan_net_ops); 476 } 477 478 module_init(vlan_init_module); 479 module_exit(vlan_cleanup_module); 480 481 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>"); 482 MODULE_DESCRIPTION("vlan manipulation actions"); 483 MODULE_LICENSE("GPL v2"); 484