1 /* 2 * Copyright (c) 2008, Intel Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, see <http://www.gnu.org/licenses/>. 15 * 16 * Author: Alexander Duyck <alexander.h.duyck@intel.com> 17 */ 18 19 #include <linux/module.h> 20 #include <linux/init.h> 21 #include <linux/kernel.h> 22 #include <linux/skbuff.h> 23 #include <linux/rtnetlink.h> 24 #include <net/netlink.h> 25 #include <net/pkt_sched.h> 26 27 #include <linux/tc_act/tc_skbedit.h> 28 #include <net/tc_act/tc_skbedit.h> 29 30 #define SKBEDIT_TAB_MASK 15 31 32 static unsigned int skbedit_net_id; 33 static struct tc_action_ops act_skbedit_ops; 34 35 static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a, 36 struct tcf_result *res) 37 { 38 struct tcf_skbedit *d = to_skbedit(a); 39 40 spin_lock(&d->tcf_lock); 41 tcf_lastuse_update(&d->tcf_tm); 42 bstats_update(&d->tcf_bstats, skb); 43 44 if (d->flags & SKBEDIT_F_PRIORITY) 45 skb->priority = d->priority; 46 if (d->flags & SKBEDIT_F_QUEUE_MAPPING && 47 skb->dev->real_num_tx_queues > d->queue_mapping) 48 skb_set_queue_mapping(skb, d->queue_mapping); 49 if (d->flags & SKBEDIT_F_MARK) { 50 skb->mark &= ~d->mask; 51 skb->mark |= d->mark & d->mask; 52 } 53 if (d->flags & SKBEDIT_F_PTYPE) 54 skb->pkt_type = d->ptype; 55 56 spin_unlock(&d->tcf_lock); 57 return d->tcf_action; 58 } 59 60 static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = { 61 [TCA_SKBEDIT_PARMS] = { .len = sizeof(struct tc_skbedit) }, 62 [TCA_SKBEDIT_PRIORITY] = { .len = sizeof(u32) }, 63 [TCA_SKBEDIT_QUEUE_MAPPING] = { .len = sizeof(u16) }, 64 [TCA_SKBEDIT_MARK] = { .len = sizeof(u32) }, 65 [TCA_SKBEDIT_PTYPE] = { .len = sizeof(u16) }, 66 [TCA_SKBEDIT_MASK] = { .len = sizeof(u32) }, 67 }; 68 69 static int tcf_skbedit_init(struct net *net, struct nlattr *nla, 70 struct nlattr *est, struct tc_action **a, 71 int ovr, int bind) 72 { 73 struct tc_action_net *tn = net_generic(net, skbedit_net_id); 74 struct nlattr *tb[TCA_SKBEDIT_MAX + 1]; 75 struct tc_skbedit *parm; 76 struct tcf_skbedit *d; 77 u32 flags = 0, *priority = NULL, *mark = NULL, *mask = NULL; 78 u16 *queue_mapping = NULL, *ptype = NULL; 79 bool exists = false; 80 int ret = 0, err; 81 82 if (nla == NULL) 83 return -EINVAL; 84 85 err = nla_parse_nested(tb, TCA_SKBEDIT_MAX, nla, skbedit_policy, NULL); 86 if (err < 0) 87 return err; 88 89 if (tb[TCA_SKBEDIT_PARMS] == NULL) 90 return -EINVAL; 91 92 if (tb[TCA_SKBEDIT_PRIORITY] != NULL) { 93 flags |= SKBEDIT_F_PRIORITY; 94 priority = nla_data(tb[TCA_SKBEDIT_PRIORITY]); 95 } 96 97 if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) { 98 flags |= SKBEDIT_F_QUEUE_MAPPING; 99 queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]); 100 } 101 102 if (tb[TCA_SKBEDIT_PTYPE] != NULL) { 103 ptype = nla_data(tb[TCA_SKBEDIT_PTYPE]); 104 if (!skb_pkt_type_ok(*ptype)) 105 return -EINVAL; 106 flags |= SKBEDIT_F_PTYPE; 107 } 108 109 if (tb[TCA_SKBEDIT_MARK] != NULL) { 110 flags |= SKBEDIT_F_MARK; 111 mark = nla_data(tb[TCA_SKBEDIT_MARK]); 112 } 113 114 if (tb[TCA_SKBEDIT_MASK] != NULL) { 115 flags |= SKBEDIT_F_MASK; 116 mask = nla_data(tb[TCA_SKBEDIT_MASK]); 117 } 118 119 parm = nla_data(tb[TCA_SKBEDIT_PARMS]); 120 121 exists = tcf_hash_check(tn, parm->index, a, bind); 122 if (exists && bind) 123 return 0; 124 125 if (!flags) { 126 tcf_hash_release(*a, bind); 127 return -EINVAL; 128 } 129 130 if (!exists) { 131 ret = tcf_hash_create(tn, parm->index, est, a, 132 &act_skbedit_ops, bind, false); 133 if (ret) 134 return ret; 135 136 d = to_skbedit(*a); 137 ret = ACT_P_CREATED; 138 } else { 139 d = to_skbedit(*a); 140 tcf_hash_release(*a, bind); 141 if (!ovr) 142 return -EEXIST; 143 } 144 145 spin_lock_bh(&d->tcf_lock); 146 147 d->flags = flags; 148 if (flags & SKBEDIT_F_PRIORITY) 149 d->priority = *priority; 150 if (flags & SKBEDIT_F_QUEUE_MAPPING) 151 d->queue_mapping = *queue_mapping; 152 if (flags & SKBEDIT_F_MARK) 153 d->mark = *mark; 154 if (flags & SKBEDIT_F_PTYPE) 155 d->ptype = *ptype; 156 /* default behaviour is to use all the bits */ 157 d->mask = 0xffffffff; 158 if (flags & SKBEDIT_F_MASK) 159 d->mask = *mask; 160 161 d->tcf_action = parm->action; 162 163 spin_unlock_bh(&d->tcf_lock); 164 165 if (ret == ACT_P_CREATED) 166 tcf_hash_insert(tn, *a); 167 return ret; 168 } 169 170 static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, 171 int bind, int ref) 172 { 173 unsigned char *b = skb_tail_pointer(skb); 174 struct tcf_skbedit *d = to_skbedit(a); 175 struct tc_skbedit opt = { 176 .index = d->tcf_index, 177 .refcnt = d->tcf_refcnt - ref, 178 .bindcnt = d->tcf_bindcnt - bind, 179 .action = d->tcf_action, 180 }; 181 struct tcf_t t; 182 183 if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt)) 184 goto nla_put_failure; 185 if ((d->flags & SKBEDIT_F_PRIORITY) && 186 nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, d->priority)) 187 goto nla_put_failure; 188 if ((d->flags & SKBEDIT_F_QUEUE_MAPPING) && 189 nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, d->queue_mapping)) 190 goto nla_put_failure; 191 if ((d->flags & SKBEDIT_F_MARK) && 192 nla_put_u32(skb, TCA_SKBEDIT_MARK, d->mark)) 193 goto nla_put_failure; 194 if ((d->flags & SKBEDIT_F_PTYPE) && 195 nla_put_u16(skb, TCA_SKBEDIT_PTYPE, d->ptype)) 196 goto nla_put_failure; 197 if ((d->flags & SKBEDIT_F_MASK) && 198 nla_put_u32(skb, TCA_SKBEDIT_MASK, d->mask)) 199 goto nla_put_failure; 200 201 tcf_tm_dump(&t, &d->tcf_tm); 202 if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD)) 203 goto nla_put_failure; 204 return skb->len; 205 206 nla_put_failure: 207 nlmsg_trim(skb, b); 208 return -1; 209 } 210 211 static int tcf_skbedit_walker(struct net *net, struct sk_buff *skb, 212 struct netlink_callback *cb, int type, 213 const struct tc_action_ops *ops) 214 { 215 struct tc_action_net *tn = net_generic(net, skbedit_net_id); 216 217 return tcf_generic_walker(tn, skb, cb, type, ops); 218 } 219 220 static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index) 221 { 222 struct tc_action_net *tn = net_generic(net, skbedit_net_id); 223 224 return tcf_hash_search(tn, a, index); 225 } 226 227 static struct tc_action_ops act_skbedit_ops = { 228 .kind = "skbedit", 229 .type = TCA_ACT_SKBEDIT, 230 .owner = THIS_MODULE, 231 .act = tcf_skbedit, 232 .dump = tcf_skbedit_dump, 233 .init = tcf_skbedit_init, 234 .walk = tcf_skbedit_walker, 235 .lookup = tcf_skbedit_search, 236 .size = sizeof(struct tcf_skbedit), 237 }; 238 239 static __net_init int skbedit_init_net(struct net *net) 240 { 241 struct tc_action_net *tn = net_generic(net, skbedit_net_id); 242 243 return tc_action_net_init(tn, &act_skbedit_ops, SKBEDIT_TAB_MASK); 244 } 245 246 static void __net_exit skbedit_exit_net(struct net *net) 247 { 248 struct tc_action_net *tn = net_generic(net, skbedit_net_id); 249 250 tc_action_net_exit(tn); 251 } 252 253 static struct pernet_operations skbedit_net_ops = { 254 .init = skbedit_init_net, 255 .exit = skbedit_exit_net, 256 .id = &skbedit_net_id, 257 .size = sizeof(struct tc_action_net), 258 }; 259 260 MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>"); 261 MODULE_DESCRIPTION("SKB Editing"); 262 MODULE_LICENSE("GPL"); 263 264 static int __init skbedit_init_module(void) 265 { 266 return tcf_register_action(&act_skbedit_ops, &skbedit_net_ops); 267 } 268 269 static void __exit skbedit_cleanup_module(void) 270 { 271 tcf_unregister_action(&act_skbedit_ops, &skbedit_net_ops); 272 } 273 274 module_init(skbedit_init_module); 275 module_exit(skbedit_cleanup_module); 276