1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us> 4 */ 5 6 #include <linux/module.h> 7 #include <linux/init.h> 8 #include <linux/kernel.h> 9 #include <linux/skbuff.h> 10 #include <linux/rtnetlink.h> 11 #include <linux/if_vlan.h> 12 #include <net/netlink.h> 13 #include <net/pkt_sched.h> 14 #include <net/pkt_cls.h> 15 16 #include <linux/tc_act/tc_vlan.h> 17 #include <net/tc_act/tc_vlan.h> 18 19 static unsigned int vlan_net_id; 20 static struct tc_action_ops act_vlan_ops; 21 22 static int tcf_vlan_act(struct sk_buff *skb, const struct tc_action *a, 23 struct tcf_result *res) 24 { 25 struct tcf_vlan *v = to_vlan(a); 26 struct tcf_vlan_params *p; 27 int action; 28 int err; 29 u16 tci; 30 31 tcf_lastuse_update(&v->tcf_tm); 32 tcf_action_update_bstats(&v->common, skb); 33 34 /* Ensure 'data' points at mac_header prior calling vlan manipulating 35 * functions. 36 */ 37 if (skb_at_tc_ingress(skb)) 38 skb_push_rcsum(skb, skb->mac_len); 39 40 action = READ_ONCE(v->tcf_action); 41 42 p = rcu_dereference_bh(v->vlan_p); 43 44 switch (p->tcfv_action) { 45 case TCA_VLAN_ACT_POP: 46 err = skb_vlan_pop(skb); 47 if (err) 48 goto drop; 49 break; 50 case TCA_VLAN_ACT_PUSH: 51 err = skb_vlan_push(skb, p->tcfv_push_proto, p->tcfv_push_vid | 52 (p->tcfv_push_prio << VLAN_PRIO_SHIFT)); 53 if (err) 54 goto drop; 55 break; 56 case TCA_VLAN_ACT_MODIFY: 57 /* No-op if no vlan tag (either hw-accel or in-payload) */ 58 if (!skb_vlan_tagged(skb)) 59 goto out; 60 /* extract existing tag (and guarantee no hw-accel tag) */ 61 if (skb_vlan_tag_present(skb)) { 62 tci = skb_vlan_tag_get(skb); 63 __vlan_hwaccel_clear_tag(skb); 64 } else { 65 /* in-payload vlan tag, pop it */ 66 err = __skb_vlan_pop(skb, &tci); 67 if (err) 68 goto drop; 69 } 70 /* replace the vid */ 71 tci = (tci & ~VLAN_VID_MASK) | p->tcfv_push_vid; 72 /* replace prio bits, if tcfv_push_prio specified */ 73 if (p->tcfv_push_prio) { 74 tci &= ~VLAN_PRIO_MASK; 75 tci |= p->tcfv_push_prio << VLAN_PRIO_SHIFT; 76 } 77 /* put updated tci as hwaccel tag */ 78 __vlan_hwaccel_put_tag(skb, p->tcfv_push_proto, tci); 79 break; 80 default: 81 BUG(); 82 } 83 84 out: 85 if (skb_at_tc_ingress(skb)) 86 skb_pull_rcsum(skb, skb->mac_len); 87 88 return action; 89 90 drop: 91 tcf_action_inc_drop_qstats(&v->common); 92 return TC_ACT_SHOT; 93 } 94 95 static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = { 96 [TCA_VLAN_PARMS] = { .len = sizeof(struct tc_vlan) }, 97 [TCA_VLAN_PUSH_VLAN_ID] = { .type = NLA_U16 }, 98 [TCA_VLAN_PUSH_VLAN_PROTOCOL] = { .type = NLA_U16 }, 99 [TCA_VLAN_PUSH_VLAN_PRIORITY] = { .type = NLA_U8 }, 100 }; 101 102 static int tcf_vlan_init(struct net *net, struct nlattr *nla, 103 struct nlattr *est, struct tc_action **a, 104 int ovr, int bind, bool rtnl_held, 105 struct tcf_proto *tp, u32 flags, 106 struct netlink_ext_ack *extack) 107 { 108 struct tc_action_net *tn = net_generic(net, vlan_net_id); 109 struct nlattr *tb[TCA_VLAN_MAX + 1]; 110 struct tcf_chain *goto_ch = NULL; 111 struct tcf_vlan_params *p; 112 struct tc_vlan *parm; 113 struct tcf_vlan *v; 114 int action; 115 u16 push_vid = 0; 116 __be16 push_proto = 0; 117 u8 push_prio = 0; 118 bool exists = false; 119 int ret = 0, err; 120 u32 index; 121 122 if (!nla) 123 return -EINVAL; 124 125 err = nla_parse_nested_deprecated(tb, TCA_VLAN_MAX, nla, vlan_policy, 126 NULL); 127 if (err < 0) 128 return err; 129 130 if (!tb[TCA_VLAN_PARMS]) 131 return -EINVAL; 132 parm = nla_data(tb[TCA_VLAN_PARMS]); 133 index = parm->index; 134 err = tcf_idr_check_alloc(tn, &index, a, bind); 135 if (err < 0) 136 return err; 137 exists = err; 138 if (exists && bind) 139 return 0; 140 141 switch (parm->v_action) { 142 case TCA_VLAN_ACT_POP: 143 break; 144 case TCA_VLAN_ACT_PUSH: 145 case TCA_VLAN_ACT_MODIFY: 146 if (!tb[TCA_VLAN_PUSH_VLAN_ID]) { 147 if (exists) 148 tcf_idr_release(*a, bind); 149 else 150 tcf_idr_cleanup(tn, index); 151 return -EINVAL; 152 } 153 push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]); 154 if (push_vid >= VLAN_VID_MASK) { 155 if (exists) 156 tcf_idr_release(*a, bind); 157 else 158 tcf_idr_cleanup(tn, index); 159 return -ERANGE; 160 } 161 162 if (tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]) { 163 push_proto = nla_get_be16(tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]); 164 switch (push_proto) { 165 case htons(ETH_P_8021Q): 166 case htons(ETH_P_8021AD): 167 break; 168 default: 169 if (exists) 170 tcf_idr_release(*a, bind); 171 else 172 tcf_idr_cleanup(tn, index); 173 return -EPROTONOSUPPORT; 174 } 175 } else { 176 push_proto = htons(ETH_P_8021Q); 177 } 178 179 if (tb[TCA_VLAN_PUSH_VLAN_PRIORITY]) 180 push_prio = nla_get_u8(tb[TCA_VLAN_PUSH_VLAN_PRIORITY]); 181 break; 182 default: 183 if (exists) 184 tcf_idr_release(*a, bind); 185 else 186 tcf_idr_cleanup(tn, index); 187 return -EINVAL; 188 } 189 action = parm->v_action; 190 191 if (!exists) { 192 ret = tcf_idr_create_from_flags(tn, index, est, a, 193 &act_vlan_ops, bind, flags); 194 if (ret) { 195 tcf_idr_cleanup(tn, index); 196 return ret; 197 } 198 199 ret = ACT_P_CREATED; 200 } else if (!ovr) { 201 tcf_idr_release(*a, bind); 202 return -EEXIST; 203 } 204 205 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); 206 if (err < 0) 207 goto release_idr; 208 209 v = to_vlan(*a); 210 211 p = kzalloc(sizeof(*p), GFP_KERNEL); 212 if (!p) { 213 err = -ENOMEM; 214 goto put_chain; 215 } 216 217 p->tcfv_action = action; 218 p->tcfv_push_vid = push_vid; 219 p->tcfv_push_prio = push_prio; 220 p->tcfv_push_proto = push_proto; 221 222 spin_lock_bh(&v->tcf_lock); 223 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); 224 p = rcu_replace_pointer(v->vlan_p, p, lockdep_is_held(&v->tcf_lock)); 225 spin_unlock_bh(&v->tcf_lock); 226 227 if (goto_ch) 228 tcf_chain_put_by_act(goto_ch); 229 if (p) 230 kfree_rcu(p, rcu); 231 232 return ret; 233 put_chain: 234 if (goto_ch) 235 tcf_chain_put_by_act(goto_ch); 236 release_idr: 237 tcf_idr_release(*a, bind); 238 return err; 239 } 240 241 static void tcf_vlan_cleanup(struct tc_action *a) 242 { 243 struct tcf_vlan *v = to_vlan(a); 244 struct tcf_vlan_params *p; 245 246 p = rcu_dereference_protected(v->vlan_p, 1); 247 if (p) 248 kfree_rcu(p, rcu); 249 } 250 251 static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a, 252 int bind, int ref) 253 { 254 unsigned char *b = skb_tail_pointer(skb); 255 struct tcf_vlan *v = to_vlan(a); 256 struct tcf_vlan_params *p; 257 struct tc_vlan opt = { 258 .index = v->tcf_index, 259 .refcnt = refcount_read(&v->tcf_refcnt) - ref, 260 .bindcnt = atomic_read(&v->tcf_bindcnt) - bind, 261 }; 262 struct tcf_t t; 263 264 spin_lock_bh(&v->tcf_lock); 265 opt.action = v->tcf_action; 266 p = rcu_dereference_protected(v->vlan_p, lockdep_is_held(&v->tcf_lock)); 267 opt.v_action = p->tcfv_action; 268 if (nla_put(skb, TCA_VLAN_PARMS, sizeof(opt), &opt)) 269 goto nla_put_failure; 270 271 if ((p->tcfv_action == TCA_VLAN_ACT_PUSH || 272 p->tcfv_action == TCA_VLAN_ACT_MODIFY) && 273 (nla_put_u16(skb, TCA_VLAN_PUSH_VLAN_ID, p->tcfv_push_vid) || 274 nla_put_be16(skb, TCA_VLAN_PUSH_VLAN_PROTOCOL, 275 p->tcfv_push_proto) || 276 (nla_put_u8(skb, TCA_VLAN_PUSH_VLAN_PRIORITY, 277 p->tcfv_push_prio)))) 278 goto nla_put_failure; 279 280 tcf_tm_dump(&t, &v->tcf_tm); 281 if (nla_put_64bit(skb, TCA_VLAN_TM, sizeof(t), &t, TCA_VLAN_PAD)) 282 goto nla_put_failure; 283 spin_unlock_bh(&v->tcf_lock); 284 285 return skb->len; 286 287 nla_put_failure: 288 spin_unlock_bh(&v->tcf_lock); 289 nlmsg_trim(skb, b); 290 return -1; 291 } 292 293 static int tcf_vlan_walker(struct net *net, struct sk_buff *skb, 294 struct netlink_callback *cb, int type, 295 const struct tc_action_ops *ops, 296 struct netlink_ext_ack *extack) 297 { 298 struct tc_action_net *tn = net_generic(net, vlan_net_id); 299 300 return tcf_generic_walker(tn, skb, cb, type, ops, extack); 301 } 302 303 static void tcf_vlan_stats_update(struct tc_action *a, u64 bytes, u64 packets, 304 u64 drops, u64 lastuse, bool hw) 305 { 306 struct tcf_vlan *v = to_vlan(a); 307 struct tcf_t *tm = &v->tcf_tm; 308 309 tcf_action_update_stats(a, bytes, packets, drops, hw); 310 tm->lastuse = max_t(u64, tm->lastuse, lastuse); 311 } 312 313 static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index) 314 { 315 struct tc_action_net *tn = net_generic(net, vlan_net_id); 316 317 return tcf_idr_search(tn, a, index); 318 } 319 320 static size_t tcf_vlan_get_fill_size(const struct tc_action *act) 321 { 322 return nla_total_size(sizeof(struct tc_vlan)) 323 + nla_total_size(sizeof(u16)) /* TCA_VLAN_PUSH_VLAN_ID */ 324 + nla_total_size(sizeof(u16)) /* TCA_VLAN_PUSH_VLAN_PROTOCOL */ 325 + nla_total_size(sizeof(u8)); /* TCA_VLAN_PUSH_VLAN_PRIORITY */ 326 } 327 328 static struct tc_action_ops act_vlan_ops = { 329 .kind = "vlan", 330 .id = TCA_ID_VLAN, 331 .owner = THIS_MODULE, 332 .act = tcf_vlan_act, 333 .dump = tcf_vlan_dump, 334 .init = tcf_vlan_init, 335 .cleanup = tcf_vlan_cleanup, 336 .walk = tcf_vlan_walker, 337 .stats_update = tcf_vlan_stats_update, 338 .get_fill_size = tcf_vlan_get_fill_size, 339 .lookup = tcf_vlan_search, 340 .size = sizeof(struct tcf_vlan), 341 }; 342 343 static __net_init int vlan_init_net(struct net *net) 344 { 345 struct tc_action_net *tn = net_generic(net, vlan_net_id); 346 347 return tc_action_net_init(net, tn, &act_vlan_ops); 348 } 349 350 static void __net_exit vlan_exit_net(struct list_head *net_list) 351 { 352 tc_action_net_exit(net_list, vlan_net_id); 353 } 354 355 static struct pernet_operations vlan_net_ops = { 356 .init = vlan_init_net, 357 .exit_batch = vlan_exit_net, 358 .id = &vlan_net_id, 359 .size = sizeof(struct tc_action_net), 360 }; 361 362 static int __init vlan_init_module(void) 363 { 364 return tcf_register_action(&act_vlan_ops, &vlan_net_ops); 365 } 366 367 static void __exit vlan_cleanup_module(void) 368 { 369 tcf_unregister_action(&act_vlan_ops, &vlan_net_ops); 370 } 371 372 module_init(vlan_init_module); 373 module_exit(vlan_cleanup_module); 374 375 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>"); 376 MODULE_DESCRIPTION("vlan manipulation actions"); 377 MODULE_LICENSE("GPL v2"); 378