1 /* 2 * Copyright (c) 2008, Intel Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, see <http://www.gnu.org/licenses/>. 15 * 16 * Author: Alexander Duyck <alexander.h.duyck@intel.com> 17 */ 18 19 #include <linux/module.h> 20 #include <linux/init.h> 21 #include <linux/kernel.h> 22 #include <linux/skbuff.h> 23 #include <linux/rtnetlink.h> 24 #include <net/netlink.h> 25 #include <net/pkt_sched.h> 26 #include <net/ip.h> 27 #include <net/ipv6.h> 28 #include <net/dsfield.h> 29 30 #include <linux/tc_act/tc_skbedit.h> 31 #include <net/tc_act/tc_skbedit.h> 32 33 static unsigned int skbedit_net_id; 34 static struct tc_action_ops act_skbedit_ops; 35 36 static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a, 37 struct tcf_result *res) 38 { 39 struct tcf_skbedit *d = to_skbedit(a); 40 struct tcf_skbedit_params *params; 41 int action; 42 43 tcf_lastuse_update(&d->tcf_tm); 44 bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb); 45 46 params = rcu_dereference_bh(d->params); 47 action = READ_ONCE(d->tcf_action); 48 49 if (params->flags & SKBEDIT_F_PRIORITY) 50 skb->priority = params->priority; 51 if (params->flags & SKBEDIT_F_INHERITDSFIELD) { 52 int wlen = skb_network_offset(skb); 53 54 switch (tc_skb_protocol(skb)) { 55 case htons(ETH_P_IP): 56 wlen += sizeof(struct iphdr); 57 if (!pskb_may_pull(skb, wlen)) 58 goto err; 59 skb->priority = ipv4_get_dsfield(ip_hdr(skb)) >> 2; 60 break; 61 62 case htons(ETH_P_IPV6): 63 wlen += sizeof(struct ipv6hdr); 64 if (!pskb_may_pull(skb, wlen)) 65 goto err; 66 skb->priority = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; 67 break; 68 } 69 } 70 if (params->flags & SKBEDIT_F_QUEUE_MAPPING && 71 skb->dev->real_num_tx_queues > params->queue_mapping) 72 skb_set_queue_mapping(skb, params->queue_mapping); 73 if (params->flags & SKBEDIT_F_MARK) { 74 skb->mark &= ~params->mask; 75 skb->mark |= params->mark & params->mask; 76 } 77 if (params->flags & SKBEDIT_F_PTYPE) 78 skb->pkt_type = params->ptype; 79 return action; 80 81 err: 82 qstats_drop_inc(this_cpu_ptr(d->common.cpu_qstats)); 83 return TC_ACT_SHOT; 84 } 85 86 static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = { 87 [TCA_SKBEDIT_PARMS] = { .len = sizeof(struct tc_skbedit) }, 88 [TCA_SKBEDIT_PRIORITY] = { .len = sizeof(u32) }, 89 [TCA_SKBEDIT_QUEUE_MAPPING] = { .len = sizeof(u16) }, 90 [TCA_SKBEDIT_MARK] = { .len = sizeof(u32) }, 91 [TCA_SKBEDIT_PTYPE] = { .len = sizeof(u16) }, 92 [TCA_SKBEDIT_MASK] = { .len = sizeof(u32) }, 93 [TCA_SKBEDIT_FLAGS] = { .len = sizeof(u64) }, 94 }; 95 96 static int tcf_skbedit_init(struct net *net, struct nlattr *nla, 97 struct nlattr *est, struct tc_action **a, 98 int ovr, int bind, bool rtnl_held, 99 struct netlink_ext_ack *extack) 100 { 101 struct tc_action_net *tn = net_generic(net, skbedit_net_id); 102 struct tcf_skbedit_params *params_new; 103 struct nlattr *tb[TCA_SKBEDIT_MAX + 1]; 104 struct tc_skbedit *parm; 105 struct tcf_skbedit *d; 106 u32 flags = 0, *priority = NULL, *mark = NULL, *mask = NULL; 107 u16 *queue_mapping = NULL, *ptype = NULL; 108 bool exists = false; 109 int ret = 0, err; 110 111 if (nla == NULL) 112 return -EINVAL; 113 114 err = nla_parse_nested(tb, TCA_SKBEDIT_MAX, nla, skbedit_policy, NULL); 115 if (err < 0) 116 return err; 117 118 if (tb[TCA_SKBEDIT_PARMS] == NULL) 119 return -EINVAL; 120 121 if (tb[TCA_SKBEDIT_PRIORITY] != NULL) { 122 flags |= SKBEDIT_F_PRIORITY; 123 priority = nla_data(tb[TCA_SKBEDIT_PRIORITY]); 124 } 125 126 if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) { 127 flags |= SKBEDIT_F_QUEUE_MAPPING; 128 queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]); 129 } 130 131 if (tb[TCA_SKBEDIT_PTYPE] != NULL) { 132 ptype = nla_data(tb[TCA_SKBEDIT_PTYPE]); 133 if (!skb_pkt_type_ok(*ptype)) 134 return -EINVAL; 135 flags |= SKBEDIT_F_PTYPE; 136 } 137 138 if (tb[TCA_SKBEDIT_MARK] != NULL) { 139 flags |= SKBEDIT_F_MARK; 140 mark = nla_data(tb[TCA_SKBEDIT_MARK]); 141 } 142 143 if (tb[TCA_SKBEDIT_MASK] != NULL) { 144 flags |= SKBEDIT_F_MASK; 145 mask = nla_data(tb[TCA_SKBEDIT_MASK]); 146 } 147 148 if (tb[TCA_SKBEDIT_FLAGS] != NULL) { 149 u64 *pure_flags = nla_data(tb[TCA_SKBEDIT_FLAGS]); 150 151 if (*pure_flags & SKBEDIT_F_INHERITDSFIELD) 152 flags |= SKBEDIT_F_INHERITDSFIELD; 153 } 154 155 parm = nla_data(tb[TCA_SKBEDIT_PARMS]); 156 157 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 158 if (err < 0) 159 return err; 160 exists = err; 161 if (exists && bind) 162 return 0; 163 164 if (!flags) { 165 if (exists) 166 tcf_idr_release(*a, bind); 167 else 168 tcf_idr_cleanup(tn, parm->index); 169 return -EINVAL; 170 } 171 172 if (!exists) { 173 ret = tcf_idr_create(tn, parm->index, est, a, 174 &act_skbedit_ops, bind, true); 175 if (ret) { 176 tcf_idr_cleanup(tn, parm->index); 177 return ret; 178 } 179 180 d = to_skbedit(*a); 181 ret = ACT_P_CREATED; 182 } else { 183 d = to_skbedit(*a); 184 if (!ovr) { 185 tcf_idr_release(*a, bind); 186 return -EEXIST; 187 } 188 } 189 190 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); 191 if (unlikely(!params_new)) { 192 if (ret == ACT_P_CREATED) 193 tcf_idr_release(*a, bind); 194 return -ENOMEM; 195 } 196 197 params_new->flags = flags; 198 if (flags & SKBEDIT_F_PRIORITY) 199 params_new->priority = *priority; 200 if (flags & SKBEDIT_F_QUEUE_MAPPING) 201 params_new->queue_mapping = *queue_mapping; 202 if (flags & SKBEDIT_F_MARK) 203 params_new->mark = *mark; 204 if (flags & SKBEDIT_F_PTYPE) 205 params_new->ptype = *ptype; 206 /* default behaviour is to use all the bits */ 207 params_new->mask = 0xffffffff; 208 if (flags & SKBEDIT_F_MASK) 209 params_new->mask = *mask; 210 211 spin_lock_bh(&d->tcf_lock); 212 d->tcf_action = parm->action; 213 rcu_swap_protected(d->params, params_new, 214 lockdep_is_held(&d->tcf_lock)); 215 spin_unlock_bh(&d->tcf_lock); 216 if (params_new) 217 kfree_rcu(params_new, rcu); 218 219 if (ret == ACT_P_CREATED) 220 tcf_idr_insert(tn, *a); 221 return ret; 222 } 223 224 static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, 225 int bind, int ref) 226 { 227 unsigned char *b = skb_tail_pointer(skb); 228 struct tcf_skbedit *d = to_skbedit(a); 229 struct tcf_skbedit_params *params; 230 struct tc_skbedit opt = { 231 .index = d->tcf_index, 232 .refcnt = refcount_read(&d->tcf_refcnt) - ref, 233 .bindcnt = atomic_read(&d->tcf_bindcnt) - bind, 234 }; 235 u64 pure_flags = 0; 236 struct tcf_t t; 237 238 spin_lock_bh(&d->tcf_lock); 239 params = rcu_dereference_protected(d->params, 240 lockdep_is_held(&d->tcf_lock)); 241 opt.action = d->tcf_action; 242 243 if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt)) 244 goto nla_put_failure; 245 if ((params->flags & SKBEDIT_F_PRIORITY) && 246 nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, params->priority)) 247 goto nla_put_failure; 248 if ((params->flags & SKBEDIT_F_QUEUE_MAPPING) && 249 nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, params->queue_mapping)) 250 goto nla_put_failure; 251 if ((params->flags & SKBEDIT_F_MARK) && 252 nla_put_u32(skb, TCA_SKBEDIT_MARK, params->mark)) 253 goto nla_put_failure; 254 if ((params->flags & SKBEDIT_F_PTYPE) && 255 nla_put_u16(skb, TCA_SKBEDIT_PTYPE, params->ptype)) 256 goto nla_put_failure; 257 if ((params->flags & SKBEDIT_F_MASK) && 258 nla_put_u32(skb, TCA_SKBEDIT_MASK, params->mask)) 259 goto nla_put_failure; 260 if (params->flags & SKBEDIT_F_INHERITDSFIELD) 261 pure_flags |= SKBEDIT_F_INHERITDSFIELD; 262 if (pure_flags != 0 && 263 nla_put(skb, TCA_SKBEDIT_FLAGS, sizeof(pure_flags), &pure_flags)) 264 goto nla_put_failure; 265 266 tcf_tm_dump(&t, &d->tcf_tm); 267 if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD)) 268 goto nla_put_failure; 269 spin_unlock_bh(&d->tcf_lock); 270 271 return skb->len; 272 273 nla_put_failure: 274 spin_unlock_bh(&d->tcf_lock); 275 nlmsg_trim(skb, b); 276 return -1; 277 } 278 279 static void tcf_skbedit_cleanup(struct tc_action *a) 280 { 281 struct tcf_skbedit *d = to_skbedit(a); 282 struct tcf_skbedit_params *params; 283 284 params = rcu_dereference_protected(d->params, 1); 285 if (params) 286 kfree_rcu(params, rcu); 287 } 288 289 static int tcf_skbedit_walker(struct net *net, struct sk_buff *skb, 290 struct netlink_callback *cb, int type, 291 const struct tc_action_ops *ops, 292 struct netlink_ext_ack *extack) 293 { 294 struct tc_action_net *tn = net_generic(net, skbedit_net_id); 295 296 return tcf_generic_walker(tn, skb, cb, type, ops, extack); 297 } 298 299 static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index) 300 { 301 struct tc_action_net *tn = net_generic(net, skbedit_net_id); 302 303 return tcf_idr_search(tn, a, index); 304 } 305 306 static struct tc_action_ops act_skbedit_ops = { 307 .kind = "skbedit", 308 .type = TCA_ACT_SKBEDIT, 309 .owner = THIS_MODULE, 310 .act = tcf_skbedit_act, 311 .dump = tcf_skbedit_dump, 312 .init = tcf_skbedit_init, 313 .cleanup = tcf_skbedit_cleanup, 314 .walk = tcf_skbedit_walker, 315 .lookup = tcf_skbedit_search, 316 .size = sizeof(struct tcf_skbedit), 317 }; 318 319 static __net_init int skbedit_init_net(struct net *net) 320 { 321 struct tc_action_net *tn = net_generic(net, skbedit_net_id); 322 323 return tc_action_net_init(tn, &act_skbedit_ops); 324 } 325 326 static void __net_exit skbedit_exit_net(struct list_head *net_list) 327 { 328 tc_action_net_exit(net_list, skbedit_net_id); 329 } 330 331 static struct pernet_operations skbedit_net_ops = { 332 .init = skbedit_init_net, 333 .exit_batch = skbedit_exit_net, 334 .id = &skbedit_net_id, 335 .size = sizeof(struct tc_action_net), 336 }; 337 338 MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>"); 339 MODULE_DESCRIPTION("SKB Editing"); 340 MODULE_LICENSE("GPL"); 341 342 static int __init skbedit_init_module(void) 343 { 344 return tcf_register_action(&act_skbedit_ops, &skbedit_net_ops); 345 } 346 347 static void __exit skbedit_cleanup_module(void) 348 { 349 tcf_unregister_action(&act_skbedit_ops, &skbedit_net_ops); 350 } 351 352 module_init(skbedit_init_module); 353 module_exit(skbedit_cleanup_module); 354