1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/act_ipt.c iptables target interface 4 * 5 *TODO: Add other tables. For now we only support the ipv4 table targets 6 * 7 * Copyright: Jamal Hadi Salim (2002-13) 8 */ 9 10 #include <linux/types.h> 11 #include <linux/kernel.h> 12 #include <linux/string.h> 13 #include <linux/errno.h> 14 #include <linux/skbuff.h> 15 #include <linux/rtnetlink.h> 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/slab.h> 19 #include <net/netlink.h> 20 #include <net/pkt_sched.h> 21 #include <linux/tc_act/tc_ipt.h> 22 #include <net/tc_act/tc_ipt.h> 23 #include <net/tc_wrapper.h> 24 25 #include <linux/netfilter_ipv4/ip_tables.h> 26 27 28 static struct tc_action_ops act_ipt_ops; 29 static struct tc_action_ops act_xt_ops; 30 31 static int ipt_init_target(struct net *net, struct xt_entry_target *t, 32 char *table, unsigned int hook) 33 { 34 struct xt_tgchk_param par; 35 struct xt_target *target; 36 struct ipt_entry e = {}; 37 int ret = 0; 38 39 target = xt_request_find_target(AF_INET, t->u.user.name, 40 t->u.user.revision); 41 if (IS_ERR(target)) 42 return PTR_ERR(target); 43 44 t->u.kernel.target = target; 45 memset(&par, 0, sizeof(par)); 46 par.net = net; 47 par.table = table; 48 par.entryinfo = &e; 49 par.target = target; 50 par.targinfo = t->data; 51 par.hook_mask = hook; 52 par.family = NFPROTO_IPV4; 53 54 ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false); 55 if (ret < 0) { 56 module_put(t->u.kernel.target->me); 57 return ret; 58 } 59 return 0; 60 } 61 62 static void ipt_destroy_target(struct xt_entry_target *t, struct net *net) 63 { 64 struct xt_tgdtor_param par = { 65 .target = t->u.kernel.target, 66 .targinfo = t->data, 67 .family = NFPROTO_IPV4, 68 .net = net, 69 }; 70 if (par.target->destroy != NULL) 71 par.target->destroy(&par); 72 module_put(par.target->me); 73 } 74 75 static void tcf_ipt_release(struct tc_action *a) 76 { 77 struct tcf_ipt *ipt = to_ipt(a); 78 79 if (ipt->tcfi_t) { 80 ipt_destroy_target(ipt->tcfi_t, a->idrinfo->net); 81 kfree(ipt->tcfi_t); 82 } 83 kfree(ipt->tcfi_tname); 84 } 85 86 static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = { 87 [TCA_IPT_TABLE] = { .type = NLA_STRING, .len = IFNAMSIZ }, 88 [TCA_IPT_HOOK] = { .type = NLA_U32 }, 89 [TCA_IPT_INDEX] = { .type = NLA_U32 }, 90 [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) }, 91 }; 92 93 static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, 94 struct nlattr *est, struct tc_action **a, 95 const struct tc_action_ops *ops, 96 struct tcf_proto *tp, u32 flags) 97 { 98 struct tc_action_net *tn = net_generic(net, id); 99 bool bind = flags & TCA_ACT_FLAGS_BIND; 100 struct nlattr *tb[TCA_IPT_MAX + 1]; 101 struct tcf_ipt *ipt; 102 struct xt_entry_target *td, *t; 103 char *tname; 104 bool exists = false; 105 int ret = 0, err; 106 u32 hook = 0; 107 u32 index = 0; 108 109 if (nla == NULL) 110 return -EINVAL; 111 112 err = nla_parse_nested_deprecated(tb, TCA_IPT_MAX, nla, ipt_policy, 113 NULL); 114 if (err < 0) 115 return err; 116 117 if (tb[TCA_IPT_INDEX] != NULL) 118 index = nla_get_u32(tb[TCA_IPT_INDEX]); 119 120 err = tcf_idr_check_alloc(tn, &index, a, bind); 121 if (err < 0) 122 return err; 123 exists = err; 124 if (exists && bind) 125 return 0; 126 127 if (tb[TCA_IPT_HOOK] == NULL || tb[TCA_IPT_TARG] == NULL) { 128 if (exists) 129 tcf_idr_release(*a, bind); 130 else 131 tcf_idr_cleanup(tn, index); 132 return -EINVAL; 133 } 134 135 td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]); 136 if (nla_len(tb[TCA_IPT_TARG]) != td->u.target_size) { 137 if (exists) 138 tcf_idr_release(*a, bind); 139 else 140 tcf_idr_cleanup(tn, index); 141 return -EINVAL; 142 } 143 144 if (!exists) { 145 ret = tcf_idr_create(tn, index, est, a, ops, bind, 146 false, flags); 147 if (ret) { 148 tcf_idr_cleanup(tn, index); 149 return ret; 150 } 151 ret = ACT_P_CREATED; 152 } else { 153 if (bind)/* dont override defaults */ 154 return 0; 155 156 if (!(flags & TCA_ACT_FLAGS_REPLACE)) { 157 tcf_idr_release(*a, bind); 158 return -EEXIST; 159 } 160 } 161 hook = nla_get_u32(tb[TCA_IPT_HOOK]); 162 163 err = -ENOMEM; 164 tname = kmalloc(IFNAMSIZ, GFP_KERNEL); 165 if (unlikely(!tname)) 166 goto err1; 167 if (tb[TCA_IPT_TABLE] == NULL || 168 nla_strscpy(tname, tb[TCA_IPT_TABLE], IFNAMSIZ) >= IFNAMSIZ) 169 strcpy(tname, "mangle"); 170 171 t = kmemdup(td, td->u.target_size, GFP_KERNEL); 172 if (unlikely(!t)) 173 goto err2; 174 175 err = ipt_init_target(net, t, tname, hook); 176 if (err < 0) 177 goto err3; 178 179 ipt = to_ipt(*a); 180 181 spin_lock_bh(&ipt->tcf_lock); 182 if (ret != ACT_P_CREATED) { 183 ipt_destroy_target(ipt->tcfi_t, net); 184 kfree(ipt->tcfi_tname); 185 kfree(ipt->tcfi_t); 186 } 187 ipt->tcfi_tname = tname; 188 ipt->tcfi_t = t; 189 ipt->tcfi_hook = hook; 190 spin_unlock_bh(&ipt->tcf_lock); 191 return ret; 192 193 err3: 194 kfree(t); 195 err2: 196 kfree(tname); 197 err1: 198 tcf_idr_release(*a, bind); 199 return err; 200 } 201 202 static int tcf_ipt_init(struct net *net, struct nlattr *nla, 203 struct nlattr *est, struct tc_action **a, 204 struct tcf_proto *tp, 205 u32 flags, struct netlink_ext_ack *extack) 206 { 207 return __tcf_ipt_init(net, act_ipt_ops.net_id, nla, est, 208 a, &act_ipt_ops, tp, flags); 209 } 210 211 static int tcf_xt_init(struct net *net, struct nlattr *nla, 212 struct nlattr *est, struct tc_action **a, 213 struct tcf_proto *tp, 214 u32 flags, struct netlink_ext_ack *extack) 215 { 216 return __tcf_ipt_init(net, act_xt_ops.net_id, nla, est, 217 a, &act_xt_ops, tp, flags); 218 } 219 220 TC_INDIRECT_SCOPE int tcf_ipt_act(struct sk_buff *skb, 221 const struct tc_action *a, 222 struct tcf_result *res) 223 { 224 int ret = 0, result = 0; 225 struct tcf_ipt *ipt = to_ipt(a); 226 struct xt_action_param par; 227 struct nf_hook_state state = { 228 .net = dev_net(skb->dev), 229 .in = skb->dev, 230 .hook = ipt->tcfi_hook, 231 .pf = NFPROTO_IPV4, 232 }; 233 234 if (skb_unclone(skb, GFP_ATOMIC)) 235 return TC_ACT_UNSPEC; 236 237 spin_lock(&ipt->tcf_lock); 238 239 tcf_lastuse_update(&ipt->tcf_tm); 240 bstats_update(&ipt->tcf_bstats, skb); 241 242 /* yes, we have to worry about both in and out dev 243 * worry later - danger - this API seems to have changed 244 * from earlier kernels 245 */ 246 par.state = &state; 247 par.target = ipt->tcfi_t->u.kernel.target; 248 par.targinfo = ipt->tcfi_t->data; 249 ret = par.target->target(skb, &par); 250 251 switch (ret) { 252 case NF_ACCEPT: 253 result = TC_ACT_OK; 254 break; 255 case NF_DROP: 256 result = TC_ACT_SHOT; 257 ipt->tcf_qstats.drops++; 258 break; 259 case XT_CONTINUE: 260 result = TC_ACT_PIPE; 261 break; 262 default: 263 net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n", 264 ret); 265 result = TC_ACT_OK; 266 break; 267 } 268 spin_unlock(&ipt->tcf_lock); 269 return result; 270 271 } 272 273 static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, 274 int ref) 275 { 276 unsigned char *b = skb_tail_pointer(skb); 277 struct tcf_ipt *ipt = to_ipt(a); 278 struct xt_entry_target *t; 279 struct tcf_t tm; 280 struct tc_cnt c; 281 282 /* for simple targets kernel size == user size 283 * user name = target name 284 * for foolproof you need to not assume this 285 */ 286 287 spin_lock_bh(&ipt->tcf_lock); 288 t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC); 289 if (unlikely(!t)) 290 goto nla_put_failure; 291 292 c.bindcnt = atomic_read(&ipt->tcf_bindcnt) - bind; 293 c.refcnt = refcount_read(&ipt->tcf_refcnt) - ref; 294 strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name); 295 296 if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) || 297 nla_put_u32(skb, TCA_IPT_INDEX, ipt->tcf_index) || 298 nla_put_u32(skb, TCA_IPT_HOOK, ipt->tcfi_hook) || 299 nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) || 300 nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname)) 301 goto nla_put_failure; 302 303 tcf_tm_dump(&tm, &ipt->tcf_tm); 304 if (nla_put_64bit(skb, TCA_IPT_TM, sizeof(tm), &tm, TCA_IPT_PAD)) 305 goto nla_put_failure; 306 307 spin_unlock_bh(&ipt->tcf_lock); 308 kfree(t); 309 return skb->len; 310 311 nla_put_failure: 312 spin_unlock_bh(&ipt->tcf_lock); 313 nlmsg_trim(skb, b); 314 kfree(t); 315 return -1; 316 } 317 318 static struct tc_action_ops act_ipt_ops = { 319 .kind = "ipt", 320 .id = TCA_ID_IPT, 321 .owner = THIS_MODULE, 322 .act = tcf_ipt_act, 323 .dump = tcf_ipt_dump, 324 .cleanup = tcf_ipt_release, 325 .init = tcf_ipt_init, 326 .size = sizeof(struct tcf_ipt), 327 }; 328 329 static __net_init int ipt_init_net(struct net *net) 330 { 331 struct tc_action_net *tn = net_generic(net, act_ipt_ops.net_id); 332 333 return tc_action_net_init(net, tn, &act_ipt_ops); 334 } 335 336 static void __net_exit ipt_exit_net(struct list_head *net_list) 337 { 338 tc_action_net_exit(net_list, act_ipt_ops.net_id); 339 } 340 341 static struct pernet_operations ipt_net_ops = { 342 .init = ipt_init_net, 343 .exit_batch = ipt_exit_net, 344 .id = &act_ipt_ops.net_id, 345 .size = sizeof(struct tc_action_net), 346 }; 347 348 static struct tc_action_ops act_xt_ops = { 349 .kind = "xt", 350 .id = TCA_ID_XT, 351 .owner = THIS_MODULE, 352 .act = tcf_ipt_act, 353 .dump = tcf_ipt_dump, 354 .cleanup = tcf_ipt_release, 355 .init = tcf_xt_init, 356 .size = sizeof(struct tcf_ipt), 357 }; 358 359 static __net_init int xt_init_net(struct net *net) 360 { 361 struct tc_action_net *tn = net_generic(net, act_xt_ops.net_id); 362 363 return tc_action_net_init(net, tn, &act_xt_ops); 364 } 365 366 static void __net_exit xt_exit_net(struct list_head *net_list) 367 { 368 tc_action_net_exit(net_list, act_xt_ops.net_id); 369 } 370 371 static struct pernet_operations xt_net_ops = { 372 .init = xt_init_net, 373 .exit_batch = xt_exit_net, 374 .id = &act_xt_ops.net_id, 375 .size = sizeof(struct tc_action_net), 376 }; 377 378 MODULE_AUTHOR("Jamal Hadi Salim(2002-13)"); 379 MODULE_DESCRIPTION("Iptables target actions"); 380 MODULE_LICENSE("GPL"); 381 MODULE_ALIAS("act_xt"); 382 383 static int __init ipt_init_module(void) 384 { 385 int ret1, ret2; 386 387 ret1 = tcf_register_action(&act_xt_ops, &xt_net_ops); 388 if (ret1 < 0) 389 pr_err("Failed to load xt action\n"); 390 391 ret2 = tcf_register_action(&act_ipt_ops, &ipt_net_ops); 392 if (ret2 < 0) 393 pr_err("Failed to load ipt action\n"); 394 395 if (ret1 < 0 && ret2 < 0) { 396 return ret1; 397 } else 398 return 0; 399 } 400 401 static void __exit ipt_cleanup_module(void) 402 { 403 tcf_unregister_action(&act_ipt_ops, &ipt_net_ops); 404 tcf_unregister_action(&act_xt_ops, &xt_net_ops); 405 } 406 407 module_init(ipt_init_module); 408 module_exit(ipt_cleanup_module); 409