1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/act_mirred.c packet mirroring and redirect actions 4 * 5 * Authors: Jamal Hadi Salim (2002-4) 6 * 7 * TODO: Add ingress support (and socket redirect support) 8 */ 9 10 #include <linux/types.h> 11 #include <linux/kernel.h> 12 #include <linux/string.h> 13 #include <linux/errno.h> 14 #include <linux/skbuff.h> 15 #include <linux/rtnetlink.h> 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/gfp.h> 19 #include <linux/if_arp.h> 20 #include <net/net_namespace.h> 21 #include <net/netlink.h> 22 #include <net/dst.h> 23 #include <net/pkt_sched.h> 24 #include <net/pkt_cls.h> 25 #include <linux/tc_act/tc_mirred.h> 26 #include <net/tc_act/tc_mirred.h> 27 28 static LIST_HEAD(mirred_list); 29 static DEFINE_SPINLOCK(mirred_list_lock); 30 31 #define MIRRED_RECURSION_LIMIT 4 32 static DEFINE_PER_CPU(unsigned int, mirred_rec_level); 33 34 static bool tcf_mirred_is_act_redirect(int action) 35 { 36 return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR; 37 } 38 39 static bool tcf_mirred_act_wants_ingress(int action) 40 { 41 switch (action) { 42 case TCA_EGRESS_REDIR: 43 case TCA_EGRESS_MIRROR: 44 return false; 45 case TCA_INGRESS_REDIR: 46 case TCA_INGRESS_MIRROR: 47 return true; 48 default: 49 BUG(); 50 } 51 } 52 53 static bool tcf_mirred_can_reinsert(int action) 54 { 55 switch (action) { 56 case TC_ACT_SHOT: 57 case TC_ACT_STOLEN: 58 case TC_ACT_QUEUED: 59 case TC_ACT_TRAP: 60 return true; 61 } 62 return false; 63 } 64 65 static struct net_device *tcf_mirred_dev_dereference(struct tcf_mirred *m) 66 { 67 return rcu_dereference_protected(m->tcfm_dev, 68 lockdep_is_held(&m->tcf_lock)); 69 } 70 71 static void tcf_mirred_release(struct tc_action *a) 72 { 73 struct tcf_mirred *m = to_mirred(a); 74 struct net_device *dev; 75 76 spin_lock(&mirred_list_lock); 77 list_del(&m->tcfm_list); 78 spin_unlock(&mirred_list_lock); 79 80 /* last reference to action, no need to lock */ 81 dev = rcu_dereference_protected(m->tcfm_dev, 1); 82 netdev_put(dev, &m->tcfm_dev_tracker); 83 } 84 85 static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = { 86 [TCA_MIRRED_PARMS] = { .len = sizeof(struct tc_mirred) }, 87 }; 88 89 static unsigned int mirred_net_id; 90 static struct tc_action_ops act_mirred_ops; 91 92 static int tcf_mirred_init(struct net *net, struct nlattr *nla, 93 struct nlattr *est, struct tc_action **a, 94 struct tcf_proto *tp, 95 u32 flags, struct netlink_ext_ack *extack) 96 { 97 struct tc_action_net *tn = net_generic(net, mirred_net_id); 98 bool bind = flags & TCA_ACT_FLAGS_BIND; 99 struct nlattr *tb[TCA_MIRRED_MAX + 1]; 100 struct tcf_chain *goto_ch = NULL; 101 bool mac_header_xmit = false; 102 struct tc_mirred *parm; 103 struct tcf_mirred *m; 104 bool exists = false; 105 int ret, err; 106 u32 index; 107 108 if (!nla) { 109 NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed"); 110 return -EINVAL; 111 } 112 ret = nla_parse_nested_deprecated(tb, TCA_MIRRED_MAX, nla, 113 mirred_policy, extack); 114 if (ret < 0) 115 return ret; 116 if (!tb[TCA_MIRRED_PARMS]) { 117 NL_SET_ERR_MSG_MOD(extack, "Missing required mirred parameters"); 118 return -EINVAL; 119 } 120 parm = nla_data(tb[TCA_MIRRED_PARMS]); 121 index = parm->index; 122 err = tcf_idr_check_alloc(tn, &index, a, bind); 123 if (err < 0) 124 return err; 125 exists = err; 126 if (exists && bind) 127 return 0; 128 129 switch (parm->eaction) { 130 case TCA_EGRESS_MIRROR: 131 case TCA_EGRESS_REDIR: 132 case TCA_INGRESS_REDIR: 133 case TCA_INGRESS_MIRROR: 134 break; 135 default: 136 if (exists) 137 tcf_idr_release(*a, bind); 138 else 139 tcf_idr_cleanup(tn, index); 140 NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option"); 141 return -EINVAL; 142 } 143 144 if (!exists) { 145 if (!parm->ifindex) { 146 tcf_idr_cleanup(tn, index); 147 NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist"); 148 return -EINVAL; 149 } 150 ret = tcf_idr_create_from_flags(tn, index, est, a, 151 &act_mirred_ops, bind, flags); 152 if (ret) { 153 tcf_idr_cleanup(tn, index); 154 return ret; 155 } 156 ret = ACT_P_CREATED; 157 } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) { 158 tcf_idr_release(*a, bind); 159 return -EEXIST; 160 } 161 162 m = to_mirred(*a); 163 if (ret == ACT_P_CREATED) 164 INIT_LIST_HEAD(&m->tcfm_list); 165 166 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); 167 if (err < 0) 168 goto release_idr; 169 170 spin_lock_bh(&m->tcf_lock); 171 172 if (parm->ifindex) { 173 struct net_device *odev, *ndev; 174 175 ndev = dev_get_by_index(net, parm->ifindex); 176 if (!ndev) { 177 spin_unlock_bh(&m->tcf_lock); 178 err = -ENODEV; 179 goto put_chain; 180 } 181 mac_header_xmit = dev_is_mac_header_xmit(ndev); 182 odev = rcu_replace_pointer(m->tcfm_dev, ndev, 183 lockdep_is_held(&m->tcf_lock)); 184 netdev_put(odev, &m->tcfm_dev_tracker); 185 netdev_tracker_alloc(ndev, &m->tcfm_dev_tracker, GFP_ATOMIC); 186 m->tcfm_mac_header_xmit = mac_header_xmit; 187 } 188 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); 189 m->tcfm_eaction = parm->eaction; 190 spin_unlock_bh(&m->tcf_lock); 191 if (goto_ch) 192 tcf_chain_put_by_act(goto_ch); 193 194 if (ret == ACT_P_CREATED) { 195 spin_lock(&mirred_list_lock); 196 list_add(&m->tcfm_list, &mirred_list); 197 spin_unlock(&mirred_list_lock); 198 } 199 200 return ret; 201 put_chain: 202 if (goto_ch) 203 tcf_chain_put_by_act(goto_ch); 204 release_idr: 205 tcf_idr_release(*a, bind); 206 return err; 207 } 208 209 static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb) 210 { 211 int err; 212 213 if (!want_ingress) 214 err = tcf_dev_queue_xmit(skb, dev_queue_xmit); 215 else 216 err = netif_receive_skb(skb); 217 218 return err; 219 } 220 221 static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, 222 struct tcf_result *res) 223 { 224 struct tcf_mirred *m = to_mirred(a); 225 struct sk_buff *skb2 = skb; 226 bool m_mac_header_xmit; 227 struct net_device *dev; 228 unsigned int rec_level; 229 int retval, err = 0; 230 bool use_reinsert; 231 bool want_ingress; 232 bool is_redirect; 233 bool expects_nh; 234 bool at_ingress; 235 int m_eaction; 236 int mac_len; 237 bool at_nh; 238 239 rec_level = __this_cpu_inc_return(mirred_rec_level); 240 if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) { 241 net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n", 242 netdev_name(skb->dev)); 243 __this_cpu_dec(mirred_rec_level); 244 return TC_ACT_SHOT; 245 } 246 247 tcf_lastuse_update(&m->tcf_tm); 248 tcf_action_update_bstats(&m->common, skb); 249 250 m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit); 251 m_eaction = READ_ONCE(m->tcfm_eaction); 252 retval = READ_ONCE(m->tcf_action); 253 dev = rcu_dereference_bh(m->tcfm_dev); 254 if (unlikely(!dev)) { 255 pr_notice_once("tc mirred: target device is gone\n"); 256 goto out; 257 } 258 259 if (unlikely(!(dev->flags & IFF_UP))) { 260 net_notice_ratelimited("tc mirred to Houston: device %s is down\n", 261 dev->name); 262 goto out; 263 } 264 265 /* we could easily avoid the clone only if called by ingress and clsact; 266 * since we can't easily detect the clsact caller, skip clone only for 267 * ingress - that covers the TC S/W datapath. 268 */ 269 is_redirect = tcf_mirred_is_act_redirect(m_eaction); 270 at_ingress = skb_at_tc_ingress(skb); 271 use_reinsert = at_ingress && is_redirect && 272 tcf_mirred_can_reinsert(retval); 273 if (!use_reinsert) { 274 skb2 = skb_clone(skb, GFP_ATOMIC); 275 if (!skb2) 276 goto out; 277 } 278 279 want_ingress = tcf_mirred_act_wants_ingress(m_eaction); 280 281 /* All mirred/redirected skbs should clear previous ct info */ 282 nf_reset_ct(skb2); 283 if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */ 284 skb_dst_drop(skb2); 285 286 expects_nh = want_ingress || !m_mac_header_xmit; 287 at_nh = skb->data == skb_network_header(skb); 288 if (at_nh != expects_nh) { 289 mac_len = skb_at_tc_ingress(skb) ? skb->mac_len : 290 skb_network_header(skb) - skb_mac_header(skb); 291 if (expects_nh) { 292 /* target device/action expect data at nh */ 293 skb_pull_rcsum(skb2, mac_len); 294 } else { 295 /* target device/action expect data at mac */ 296 skb_push_rcsum(skb2, mac_len); 297 } 298 } 299 300 skb2->skb_iif = skb->dev->ifindex; 301 skb2->dev = dev; 302 303 /* mirror is always swallowed */ 304 if (is_redirect) { 305 skb_set_redirected(skb2, skb2->tc_at_ingress); 306 307 /* let's the caller reinsert the packet, if possible */ 308 if (use_reinsert) { 309 res->ingress = want_ingress; 310 err = tcf_mirred_forward(res->ingress, skb); 311 if (err) 312 tcf_action_inc_overlimit_qstats(&m->common); 313 __this_cpu_dec(mirred_rec_level); 314 return TC_ACT_CONSUMED; 315 } 316 } 317 318 err = tcf_mirred_forward(want_ingress, skb2); 319 if (err) { 320 out: 321 tcf_action_inc_overlimit_qstats(&m->common); 322 if (tcf_mirred_is_act_redirect(m_eaction)) 323 retval = TC_ACT_SHOT; 324 } 325 __this_cpu_dec(mirred_rec_level); 326 327 return retval; 328 } 329 330 static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets, 331 u64 drops, u64 lastuse, bool hw) 332 { 333 struct tcf_mirred *m = to_mirred(a); 334 struct tcf_t *tm = &m->tcf_tm; 335 336 tcf_action_update_stats(a, bytes, packets, drops, hw); 337 tm->lastuse = max_t(u64, tm->lastuse, lastuse); 338 } 339 340 static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, 341 int ref) 342 { 343 unsigned char *b = skb_tail_pointer(skb); 344 struct tcf_mirred *m = to_mirred(a); 345 struct tc_mirred opt = { 346 .index = m->tcf_index, 347 .refcnt = refcount_read(&m->tcf_refcnt) - ref, 348 .bindcnt = atomic_read(&m->tcf_bindcnt) - bind, 349 }; 350 struct net_device *dev; 351 struct tcf_t t; 352 353 spin_lock_bh(&m->tcf_lock); 354 opt.action = m->tcf_action; 355 opt.eaction = m->tcfm_eaction; 356 dev = tcf_mirred_dev_dereference(m); 357 if (dev) 358 opt.ifindex = dev->ifindex; 359 360 if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt)) 361 goto nla_put_failure; 362 363 tcf_tm_dump(&t, &m->tcf_tm); 364 if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD)) 365 goto nla_put_failure; 366 spin_unlock_bh(&m->tcf_lock); 367 368 return skb->len; 369 370 nla_put_failure: 371 spin_unlock_bh(&m->tcf_lock); 372 nlmsg_trim(skb, b); 373 return -1; 374 } 375 376 static int tcf_mirred_walker(struct net *net, struct sk_buff *skb, 377 struct netlink_callback *cb, int type, 378 const struct tc_action_ops *ops, 379 struct netlink_ext_ack *extack) 380 { 381 struct tc_action_net *tn = net_generic(net, mirred_net_id); 382 383 return tcf_generic_walker(tn, skb, cb, type, ops, extack); 384 } 385 386 static int tcf_mirred_search(struct net *net, struct tc_action **a, u32 index) 387 { 388 struct tc_action_net *tn = net_generic(net, mirred_net_id); 389 390 return tcf_idr_search(tn, a, index); 391 } 392 393 static int mirred_device_event(struct notifier_block *unused, 394 unsigned long event, void *ptr) 395 { 396 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 397 struct tcf_mirred *m; 398 399 ASSERT_RTNL(); 400 if (event == NETDEV_UNREGISTER) { 401 spin_lock(&mirred_list_lock); 402 list_for_each_entry(m, &mirred_list, tcfm_list) { 403 spin_lock_bh(&m->tcf_lock); 404 if (tcf_mirred_dev_dereference(m) == dev) { 405 netdev_put(dev, &m->tcfm_dev_tracker); 406 /* Note : no rcu grace period necessary, as 407 * net_device are already rcu protected. 408 */ 409 RCU_INIT_POINTER(m->tcfm_dev, NULL); 410 } 411 spin_unlock_bh(&m->tcf_lock); 412 } 413 spin_unlock(&mirred_list_lock); 414 } 415 416 return NOTIFY_DONE; 417 } 418 419 static struct notifier_block mirred_device_notifier = { 420 .notifier_call = mirred_device_event, 421 }; 422 423 static void tcf_mirred_dev_put(void *priv) 424 { 425 struct net_device *dev = priv; 426 427 dev_put(dev); 428 } 429 430 static struct net_device * 431 tcf_mirred_get_dev(const struct tc_action *a, 432 tc_action_priv_destructor *destructor) 433 { 434 struct tcf_mirred *m = to_mirred(a); 435 struct net_device *dev; 436 437 rcu_read_lock(); 438 dev = rcu_dereference(m->tcfm_dev); 439 if (dev) { 440 dev_hold(dev); 441 *destructor = tcf_mirred_dev_put; 442 } 443 rcu_read_unlock(); 444 445 return dev; 446 } 447 448 static size_t tcf_mirred_get_fill_size(const struct tc_action *act) 449 { 450 return nla_total_size(sizeof(struct tc_mirred)); 451 } 452 453 static void tcf_offload_mirred_get_dev(struct flow_action_entry *entry, 454 const struct tc_action *act) 455 { 456 entry->dev = act->ops->get_dev(act, &entry->destructor); 457 if (!entry->dev) 458 return; 459 entry->destructor_priv = entry->dev; 460 } 461 462 static int tcf_mirred_offload_act_setup(struct tc_action *act, void *entry_data, 463 u32 *index_inc, bool bind, 464 struct netlink_ext_ack *extack) 465 { 466 if (bind) { 467 struct flow_action_entry *entry = entry_data; 468 469 if (is_tcf_mirred_egress_redirect(act)) { 470 entry->id = FLOW_ACTION_REDIRECT; 471 tcf_offload_mirred_get_dev(entry, act); 472 } else if (is_tcf_mirred_egress_mirror(act)) { 473 entry->id = FLOW_ACTION_MIRRED; 474 tcf_offload_mirred_get_dev(entry, act); 475 } else if (is_tcf_mirred_ingress_redirect(act)) { 476 entry->id = FLOW_ACTION_REDIRECT_INGRESS; 477 tcf_offload_mirred_get_dev(entry, act); 478 } else if (is_tcf_mirred_ingress_mirror(act)) { 479 entry->id = FLOW_ACTION_MIRRED_INGRESS; 480 tcf_offload_mirred_get_dev(entry, act); 481 } else { 482 NL_SET_ERR_MSG_MOD(extack, "Unsupported mirred offload"); 483 return -EOPNOTSUPP; 484 } 485 *index_inc = 1; 486 } else { 487 struct flow_offload_action *fl_action = entry_data; 488 489 if (is_tcf_mirred_egress_redirect(act)) 490 fl_action->id = FLOW_ACTION_REDIRECT; 491 else if (is_tcf_mirred_egress_mirror(act)) 492 fl_action->id = FLOW_ACTION_MIRRED; 493 else if (is_tcf_mirred_ingress_redirect(act)) 494 fl_action->id = FLOW_ACTION_REDIRECT_INGRESS; 495 else if (is_tcf_mirred_ingress_mirror(act)) 496 fl_action->id = FLOW_ACTION_MIRRED_INGRESS; 497 else 498 return -EOPNOTSUPP; 499 } 500 501 return 0; 502 } 503 504 static struct tc_action_ops act_mirred_ops = { 505 .kind = "mirred", 506 .id = TCA_ID_MIRRED, 507 .owner = THIS_MODULE, 508 .act = tcf_mirred_act, 509 .stats_update = tcf_stats_update, 510 .dump = tcf_mirred_dump, 511 .cleanup = tcf_mirred_release, 512 .init = tcf_mirred_init, 513 .walk = tcf_mirred_walker, 514 .lookup = tcf_mirred_search, 515 .get_fill_size = tcf_mirred_get_fill_size, 516 .offload_act_setup = tcf_mirred_offload_act_setup, 517 .size = sizeof(struct tcf_mirred), 518 .get_dev = tcf_mirred_get_dev, 519 }; 520 521 static __net_init int mirred_init_net(struct net *net) 522 { 523 struct tc_action_net *tn = net_generic(net, mirred_net_id); 524 525 return tc_action_net_init(net, tn, &act_mirred_ops); 526 } 527 528 static void __net_exit mirred_exit_net(struct list_head *net_list) 529 { 530 tc_action_net_exit(net_list, mirred_net_id); 531 } 532 533 static struct pernet_operations mirred_net_ops = { 534 .init = mirred_init_net, 535 .exit_batch = mirred_exit_net, 536 .id = &mirred_net_id, 537 .size = sizeof(struct tc_action_net), 538 }; 539 540 MODULE_AUTHOR("Jamal Hadi Salim(2002)"); 541 MODULE_DESCRIPTION("Device Mirror/redirect actions"); 542 MODULE_LICENSE("GPL"); 543 544 static int __init mirred_init_module(void) 545 { 546 int err = register_netdevice_notifier(&mirred_device_notifier); 547 if (err) 548 return err; 549 550 pr_info("Mirror/redirect action on\n"); 551 err = tcf_register_action(&act_mirred_ops, &mirred_net_ops); 552 if (err) 553 unregister_netdevice_notifier(&mirred_device_notifier); 554 555 return err; 556 } 557 558 static void __exit mirred_cleanup_module(void) 559 { 560 tcf_unregister_action(&act_mirred_ops, &mirred_net_ops); 561 unregister_netdevice_notifier(&mirred_device_notifier); 562 } 563 564 module_init(mirred_init_module); 565 module_exit(mirred_cleanup_module); 566