1 /* 2 * net/sched/act_ipt.c iptables target interface 3 * 4 *TODO: Add other tables. For now we only support the ipv4 table targets 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 * Copyright: Jamal Hadi Salim (2002-13) 12 */ 13 14 #include <linux/types.h> 15 #include <linux/kernel.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/skbuff.h> 19 #include <linux/rtnetlink.h> 20 #include <linux/module.h> 21 #include <linux/init.h> 22 #include <linux/slab.h> 23 #include <net/netlink.h> 24 #include <net/pkt_sched.h> 25 #include <linux/tc_act/tc_ipt.h> 26 #include <net/tc_act/tc_ipt.h> 27 28 #include <linux/netfilter_ipv4/ip_tables.h> 29 30 31 static unsigned int ipt_net_id; 32 static struct tc_action_ops act_ipt_ops; 33 34 static unsigned int xt_net_id; 35 static struct tc_action_ops act_xt_ops; 36 37 static int ipt_init_target(struct net *net, struct xt_entry_target *t, 38 char *table, unsigned int hook) 39 { 40 struct xt_tgchk_param par; 41 struct xt_target *target; 42 struct ipt_entry e = {}; 43 int ret = 0; 44 45 target = xt_request_find_target(AF_INET, t->u.user.name, 46 t->u.user.revision); 47 if (IS_ERR(target)) 48 return PTR_ERR(target); 49 50 t->u.kernel.target = target; 51 memset(&par, 0, sizeof(par)); 52 par.net = net; 53 par.table = table; 54 par.entryinfo = &e; 55 par.target = target; 56 par.targinfo = t->data; 57 par.hook_mask = hook; 58 par.family = NFPROTO_IPV4; 59 60 ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false); 61 if (ret < 0) { 62 module_put(t->u.kernel.target->me); 63 return ret; 64 } 65 return 0; 66 } 67 68 static void ipt_destroy_target(struct xt_entry_target *t) 69 { 70 struct xt_tgdtor_param par = { 71 .target = t->u.kernel.target, 72 .targinfo = t->data, 73 .family = NFPROTO_IPV4, 74 }; 75 if (par.target->destroy != NULL) 76 par.target->destroy(&par); 77 module_put(par.target->me); 78 } 79 80 static void tcf_ipt_release(struct tc_action *a) 81 { 82 struct tcf_ipt *ipt = to_ipt(a); 83 84 if (ipt->tcfi_t) { 85 ipt_destroy_target(ipt->tcfi_t); 86 kfree(ipt->tcfi_t); 87 } 88 kfree(ipt->tcfi_tname); 89 } 90 91 static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = { 92 [TCA_IPT_TABLE] = { .type = NLA_STRING, .len = IFNAMSIZ }, 93 [TCA_IPT_HOOK] = { .type = NLA_U32 }, 94 [TCA_IPT_INDEX] = { .type = NLA_U32 }, 95 [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) }, 96 }; 97 98 static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, 99 struct nlattr *est, struct tc_action **a, 100 const struct tc_action_ops *ops, int ovr, int bind) 101 { 102 struct tc_action_net *tn = net_generic(net, id); 103 struct nlattr *tb[TCA_IPT_MAX + 1]; 104 struct tcf_ipt *ipt; 105 struct xt_entry_target *td, *t; 106 char *tname; 107 bool exists = false; 108 int ret = 0, err; 109 u32 hook = 0; 110 u32 index = 0; 111 112 if (nla == NULL) 113 return -EINVAL; 114 115 err = nla_parse_nested(tb, TCA_IPT_MAX, nla, ipt_policy, NULL); 116 if (err < 0) 117 return err; 118 119 if (tb[TCA_IPT_INDEX] != NULL) 120 index = nla_get_u32(tb[TCA_IPT_INDEX]); 121 122 exists = tcf_idr_check(tn, index, a, bind); 123 if (exists && bind) 124 return 0; 125 126 if (tb[TCA_IPT_HOOK] == NULL || tb[TCA_IPT_TARG] == NULL) { 127 if (exists) 128 tcf_idr_release(*a, bind); 129 return -EINVAL; 130 } 131 132 td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]); 133 if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) { 134 if (exists) 135 tcf_idr_release(*a, bind); 136 return -EINVAL; 137 } 138 139 if (!exists) { 140 ret = tcf_idr_create(tn, index, est, a, ops, bind, 141 false); 142 if (ret) 143 return ret; 144 ret = ACT_P_CREATED; 145 } else { 146 if (bind)/* dont override defaults */ 147 return 0; 148 tcf_idr_release(*a, bind); 149 150 if (!ovr) 151 return -EEXIST; 152 } 153 hook = nla_get_u32(tb[TCA_IPT_HOOK]); 154 155 err = -ENOMEM; 156 tname = kmalloc(IFNAMSIZ, GFP_KERNEL); 157 if (unlikely(!tname)) 158 goto err1; 159 if (tb[TCA_IPT_TABLE] == NULL || 160 nla_strlcpy(tname, tb[TCA_IPT_TABLE], IFNAMSIZ) >= IFNAMSIZ) 161 strcpy(tname, "mangle"); 162 163 t = kmemdup(td, td->u.target_size, GFP_KERNEL); 164 if (unlikely(!t)) 165 goto err2; 166 167 err = ipt_init_target(net, t, tname, hook); 168 if (err < 0) 169 goto err3; 170 171 ipt = to_ipt(*a); 172 173 spin_lock_bh(&ipt->tcf_lock); 174 if (ret != ACT_P_CREATED) { 175 ipt_destroy_target(ipt->tcfi_t); 176 kfree(ipt->tcfi_tname); 177 kfree(ipt->tcfi_t); 178 } 179 ipt->tcfi_tname = tname; 180 ipt->tcfi_t = t; 181 ipt->tcfi_hook = hook; 182 spin_unlock_bh(&ipt->tcf_lock); 183 if (ret == ACT_P_CREATED) 184 tcf_idr_insert(tn, *a); 185 return ret; 186 187 err3: 188 kfree(t); 189 err2: 190 kfree(tname); 191 err1: 192 if (ret == ACT_P_CREATED) 193 tcf_idr_release(*a, bind); 194 return err; 195 } 196 197 static int tcf_ipt_init(struct net *net, struct nlattr *nla, 198 struct nlattr *est, struct tc_action **a, int ovr, 199 int bind, bool rtnl_held, 200 struct netlink_ext_ack *extack) 201 { 202 return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr, 203 bind); 204 } 205 206 static int tcf_xt_init(struct net *net, struct nlattr *nla, 207 struct nlattr *est, struct tc_action **a, int ovr, 208 int bind, bool unlocked, 209 struct netlink_ext_ack *extack) 210 { 211 return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr, 212 bind); 213 } 214 215 static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a, 216 struct tcf_result *res) 217 { 218 int ret = 0, result = 0; 219 struct tcf_ipt *ipt = to_ipt(a); 220 struct xt_action_param par; 221 struct nf_hook_state state = { 222 .net = dev_net(skb->dev), 223 .in = skb->dev, 224 .hook = ipt->tcfi_hook, 225 .pf = NFPROTO_IPV4, 226 }; 227 228 if (skb_unclone(skb, GFP_ATOMIC)) 229 return TC_ACT_UNSPEC; 230 231 spin_lock(&ipt->tcf_lock); 232 233 tcf_lastuse_update(&ipt->tcf_tm); 234 bstats_update(&ipt->tcf_bstats, skb); 235 236 /* yes, we have to worry about both in and out dev 237 * worry later - danger - this API seems to have changed 238 * from earlier kernels 239 */ 240 par.state = &state; 241 par.target = ipt->tcfi_t->u.kernel.target; 242 par.targinfo = ipt->tcfi_t->data; 243 ret = par.target->target(skb, &par); 244 245 switch (ret) { 246 case NF_ACCEPT: 247 result = TC_ACT_OK; 248 break; 249 case NF_DROP: 250 result = TC_ACT_SHOT; 251 ipt->tcf_qstats.drops++; 252 break; 253 case XT_CONTINUE: 254 result = TC_ACT_PIPE; 255 break; 256 default: 257 net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n", 258 ret); 259 result = TC_ACT_OK; 260 break; 261 } 262 spin_unlock(&ipt->tcf_lock); 263 return result; 264 265 } 266 267 static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, 268 int ref) 269 { 270 unsigned char *b = skb_tail_pointer(skb); 271 struct tcf_ipt *ipt = to_ipt(a); 272 struct xt_entry_target *t; 273 struct tcf_t tm; 274 struct tc_cnt c; 275 276 /* for simple targets kernel size == user size 277 * user name = target name 278 * for foolproof you need to not assume this 279 */ 280 281 t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC); 282 if (unlikely(!t)) 283 goto nla_put_failure; 284 285 c.bindcnt = atomic_read(&ipt->tcf_bindcnt) - bind; 286 c.refcnt = refcount_read(&ipt->tcf_refcnt) - ref; 287 strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name); 288 289 if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) || 290 nla_put_u32(skb, TCA_IPT_INDEX, ipt->tcf_index) || 291 nla_put_u32(skb, TCA_IPT_HOOK, ipt->tcfi_hook) || 292 nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) || 293 nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname)) 294 goto nla_put_failure; 295 296 tcf_tm_dump(&tm, &ipt->tcf_tm); 297 if (nla_put_64bit(skb, TCA_IPT_TM, sizeof(tm), &tm, TCA_IPT_PAD)) 298 goto nla_put_failure; 299 300 kfree(t); 301 return skb->len; 302 303 nla_put_failure: 304 nlmsg_trim(skb, b); 305 kfree(t); 306 return -1; 307 } 308 309 static int tcf_ipt_walker(struct net *net, struct sk_buff *skb, 310 struct netlink_callback *cb, int type, 311 const struct tc_action_ops *ops, 312 struct netlink_ext_ack *extack) 313 { 314 struct tc_action_net *tn = net_generic(net, ipt_net_id); 315 316 return tcf_generic_walker(tn, skb, cb, type, ops, extack); 317 } 318 319 static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index, 320 struct netlink_ext_ack *extack) 321 { 322 struct tc_action_net *tn = net_generic(net, ipt_net_id); 323 324 return tcf_idr_search(tn, a, index); 325 } 326 327 static int tcf_ipt_delete(struct net *net, u32 index) 328 { 329 struct tc_action_net *tn = net_generic(net, ipt_net_id); 330 331 return tcf_idr_delete_index(tn, index); 332 } 333 334 static struct tc_action_ops act_ipt_ops = { 335 .kind = "ipt", 336 .type = TCA_ACT_IPT, 337 .owner = THIS_MODULE, 338 .act = tcf_ipt, 339 .dump = tcf_ipt_dump, 340 .cleanup = tcf_ipt_release, 341 .init = tcf_ipt_init, 342 .walk = tcf_ipt_walker, 343 .lookup = tcf_ipt_search, 344 .delete = tcf_ipt_delete, 345 .size = sizeof(struct tcf_ipt), 346 }; 347 348 static __net_init int ipt_init_net(struct net *net) 349 { 350 struct tc_action_net *tn = net_generic(net, ipt_net_id); 351 352 return tc_action_net_init(tn, &act_ipt_ops); 353 } 354 355 static void __net_exit ipt_exit_net(struct list_head *net_list) 356 { 357 tc_action_net_exit(net_list, ipt_net_id); 358 } 359 360 static struct pernet_operations ipt_net_ops = { 361 .init = ipt_init_net, 362 .exit_batch = ipt_exit_net, 363 .id = &ipt_net_id, 364 .size = sizeof(struct tc_action_net), 365 }; 366 367 static int tcf_xt_walker(struct net *net, struct sk_buff *skb, 368 struct netlink_callback *cb, int type, 369 const struct tc_action_ops *ops, 370 struct netlink_ext_ack *extack) 371 { 372 struct tc_action_net *tn = net_generic(net, xt_net_id); 373 374 return tcf_generic_walker(tn, skb, cb, type, ops, extack); 375 } 376 377 static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index, 378 struct netlink_ext_ack *extack) 379 { 380 struct tc_action_net *tn = net_generic(net, xt_net_id); 381 382 return tcf_idr_search(tn, a, index); 383 } 384 385 static int tcf_xt_delete(struct net *net, u32 index) 386 { 387 struct tc_action_net *tn = net_generic(net, xt_net_id); 388 389 return tcf_idr_delete_index(tn, index); 390 } 391 392 static struct tc_action_ops act_xt_ops = { 393 .kind = "xt", 394 .type = TCA_ACT_XT, 395 .owner = THIS_MODULE, 396 .act = tcf_ipt, 397 .dump = tcf_ipt_dump, 398 .cleanup = tcf_ipt_release, 399 .init = tcf_xt_init, 400 .walk = tcf_xt_walker, 401 .lookup = tcf_xt_search, 402 .delete = tcf_xt_delete, 403 .size = sizeof(struct tcf_ipt), 404 }; 405 406 static __net_init int xt_init_net(struct net *net) 407 { 408 struct tc_action_net *tn = net_generic(net, xt_net_id); 409 410 return tc_action_net_init(tn, &act_xt_ops); 411 } 412 413 static void __net_exit xt_exit_net(struct list_head *net_list) 414 { 415 tc_action_net_exit(net_list, xt_net_id); 416 } 417 418 static struct pernet_operations xt_net_ops = { 419 .init = xt_init_net, 420 .exit_batch = xt_exit_net, 421 .id = &xt_net_id, 422 .size = sizeof(struct tc_action_net), 423 }; 424 425 MODULE_AUTHOR("Jamal Hadi Salim(2002-13)"); 426 MODULE_DESCRIPTION("Iptables target actions"); 427 MODULE_LICENSE("GPL"); 428 MODULE_ALIAS("act_xt"); 429 430 static int __init ipt_init_module(void) 431 { 432 int ret1, ret2; 433 434 ret1 = tcf_register_action(&act_xt_ops, &xt_net_ops); 435 if (ret1 < 0) 436 pr_err("Failed to load xt action\n"); 437 438 ret2 = tcf_register_action(&act_ipt_ops, &ipt_net_ops); 439 if (ret2 < 0) 440 pr_err("Failed to load ipt action\n"); 441 442 if (ret1 < 0 && ret2 < 0) { 443 return ret1; 444 } else 445 return 0; 446 } 447 448 static void __exit ipt_cleanup_module(void) 449 { 450 tcf_unregister_action(&act_ipt_ops, &ipt_net_ops); 451 tcf_unregister_action(&act_xt_ops, &xt_net_ops); 452 } 453 454 module_init(ipt_init_module); 455 module_exit(ipt_cleanup_module); 456