1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Copyright 2020 NXP */ 3 4 #include <linux/module.h> 5 #include <linux/types.h> 6 #include <linux/kernel.h> 7 #include <linux/string.h> 8 #include <linux/errno.h> 9 #include <linux/skbuff.h> 10 #include <linux/rtnetlink.h> 11 #include <linux/init.h> 12 #include <linux/slab.h> 13 #include <net/act_api.h> 14 #include <net/netlink.h> 15 #include <net/pkt_cls.h> 16 #include <net/tc_act/tc_gate.h> 17 18 static struct tc_action_ops act_gate_ops; 19 20 static ktime_t gate_get_time(struct tcf_gate *gact) 21 { 22 ktime_t mono = ktime_get(); 23 24 switch (gact->tk_offset) { 25 case TK_OFFS_MAX: 26 return mono; 27 default: 28 return ktime_mono_to_any(mono, gact->tk_offset); 29 } 30 31 return KTIME_MAX; 32 } 33 34 static void gate_get_start_time(struct tcf_gate *gact, ktime_t *start) 35 { 36 struct tcf_gate_params *param = &gact->param; 37 ktime_t now, base, cycle; 38 u64 n; 39 40 base = ns_to_ktime(param->tcfg_basetime); 41 now = gate_get_time(gact); 42 43 if (ktime_after(base, now)) { 44 *start = base; 45 return; 46 } 47 48 cycle = param->tcfg_cycletime; 49 50 n = div64_u64(ktime_sub_ns(now, base), cycle); 51 *start = ktime_add_ns(base, (n + 1) * cycle); 52 } 53 54 static void gate_start_timer(struct tcf_gate *gact, ktime_t start) 55 { 56 ktime_t expires; 57 58 expires = hrtimer_get_expires(&gact->hitimer); 59 if (expires == 0) 60 expires = KTIME_MAX; 61 62 start = min_t(ktime_t, start, expires); 63 64 hrtimer_start(&gact->hitimer, start, HRTIMER_MODE_ABS_SOFT); 65 } 66 67 static enum hrtimer_restart gate_timer_func(struct hrtimer *timer) 68 { 69 struct tcf_gate *gact = container_of(timer, struct tcf_gate, 70 hitimer); 71 struct tcf_gate_params *p = &gact->param; 72 struct tcfg_gate_entry *next; 73 ktime_t close_time, now; 74 75 spin_lock(&gact->tcf_lock); 76 77 next = gact->next_entry; 78 79 /* cycle start, clear pending bit, clear total octets */ 80 gact->current_gate_status = next->gate_state ? GATE_ACT_GATE_OPEN : 0; 81 gact->current_entry_octets = 0; 82 gact->current_max_octets = next->maxoctets; 83 84 gact->current_close_time = ktime_add_ns(gact->current_close_time, 85 next->interval); 86 87 close_time = gact->current_close_time; 88 89 if (list_is_last(&next->list, &p->entries)) 90 next = list_first_entry(&p->entries, 91 struct tcfg_gate_entry, list); 92 else 93 next = list_next_entry(next, list); 94 95 now = gate_get_time(gact); 96 97 if (ktime_after(now, close_time)) { 98 ktime_t cycle, base; 99 u64 n; 100 101 cycle = p->tcfg_cycletime; 102 base = ns_to_ktime(p->tcfg_basetime); 103 n = div64_u64(ktime_sub_ns(now, base), cycle); 104 close_time = ktime_add_ns(base, (n + 1) * cycle); 105 } 106 107 gact->next_entry = next; 108 109 hrtimer_set_expires(&gact->hitimer, close_time); 110 111 spin_unlock(&gact->tcf_lock); 112 113 return HRTIMER_RESTART; 114 } 115 116 static int tcf_gate_act(struct sk_buff *skb, const struct tc_action *a, 117 struct tcf_result *res) 118 { 119 struct tcf_gate *gact = to_gate(a); 120 121 spin_lock(&gact->tcf_lock); 122 123 tcf_lastuse_update(&gact->tcf_tm); 124 bstats_update(&gact->tcf_bstats, skb); 125 126 if (unlikely(gact->current_gate_status & GATE_ACT_PENDING)) { 127 spin_unlock(&gact->tcf_lock); 128 return gact->tcf_action; 129 } 130 131 if (!(gact->current_gate_status & GATE_ACT_GATE_OPEN)) 132 goto drop; 133 134 if (gact->current_max_octets >= 0) { 135 gact->current_entry_octets += qdisc_pkt_len(skb); 136 if (gact->current_entry_octets > gact->current_max_octets) { 137 gact->tcf_qstats.overlimits++; 138 goto drop; 139 } 140 } 141 142 spin_unlock(&gact->tcf_lock); 143 144 return gact->tcf_action; 145 drop: 146 gact->tcf_qstats.drops++; 147 spin_unlock(&gact->tcf_lock); 148 149 return TC_ACT_SHOT; 150 } 151 152 static const struct nla_policy entry_policy[TCA_GATE_ENTRY_MAX + 1] = { 153 [TCA_GATE_ENTRY_INDEX] = { .type = NLA_U32 }, 154 [TCA_GATE_ENTRY_GATE] = { .type = NLA_FLAG }, 155 [TCA_GATE_ENTRY_INTERVAL] = { .type = NLA_U32 }, 156 [TCA_GATE_ENTRY_IPV] = { .type = NLA_S32 }, 157 [TCA_GATE_ENTRY_MAX_OCTETS] = { .type = NLA_S32 }, 158 }; 159 160 static const struct nla_policy gate_policy[TCA_GATE_MAX + 1] = { 161 [TCA_GATE_PARMS] = 162 NLA_POLICY_EXACT_LEN(sizeof(struct tc_gate)), 163 [TCA_GATE_PRIORITY] = { .type = NLA_S32 }, 164 [TCA_GATE_ENTRY_LIST] = { .type = NLA_NESTED }, 165 [TCA_GATE_BASE_TIME] = { .type = NLA_U64 }, 166 [TCA_GATE_CYCLE_TIME] = { .type = NLA_U64 }, 167 [TCA_GATE_CYCLE_TIME_EXT] = { .type = NLA_U64 }, 168 [TCA_GATE_FLAGS] = { .type = NLA_U32 }, 169 [TCA_GATE_CLOCKID] = { .type = NLA_S32 }, 170 }; 171 172 static int fill_gate_entry(struct nlattr **tb, struct tcfg_gate_entry *entry, 173 struct netlink_ext_ack *extack) 174 { 175 u32 interval = 0; 176 177 entry->gate_state = nla_get_flag(tb[TCA_GATE_ENTRY_GATE]); 178 179 if (tb[TCA_GATE_ENTRY_INTERVAL]) 180 interval = nla_get_u32(tb[TCA_GATE_ENTRY_INTERVAL]); 181 182 if (interval == 0) { 183 NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry"); 184 return -EINVAL; 185 } 186 187 entry->interval = interval; 188 189 if (tb[TCA_GATE_ENTRY_IPV]) 190 entry->ipv = nla_get_s32(tb[TCA_GATE_ENTRY_IPV]); 191 else 192 entry->ipv = -1; 193 194 if (tb[TCA_GATE_ENTRY_MAX_OCTETS]) 195 entry->maxoctets = nla_get_s32(tb[TCA_GATE_ENTRY_MAX_OCTETS]); 196 else 197 entry->maxoctets = -1; 198 199 return 0; 200 } 201 202 static int parse_gate_entry(struct nlattr *n, struct tcfg_gate_entry *entry, 203 int index, struct netlink_ext_ack *extack) 204 { 205 struct nlattr *tb[TCA_GATE_ENTRY_MAX + 1] = { }; 206 int err; 207 208 err = nla_parse_nested(tb, TCA_GATE_ENTRY_MAX, n, entry_policy, extack); 209 if (err < 0) { 210 NL_SET_ERR_MSG(extack, "Could not parse nested entry"); 211 return -EINVAL; 212 } 213 214 entry->index = index; 215 216 return fill_gate_entry(tb, entry, extack); 217 } 218 219 static void release_entry_list(struct list_head *entries) 220 { 221 struct tcfg_gate_entry *entry, *e; 222 223 list_for_each_entry_safe(entry, e, entries, list) { 224 list_del(&entry->list); 225 kfree(entry); 226 } 227 } 228 229 static int parse_gate_list(struct nlattr *list_attr, 230 struct tcf_gate_params *sched, 231 struct netlink_ext_ack *extack) 232 { 233 struct tcfg_gate_entry *entry; 234 struct nlattr *n; 235 int err, rem; 236 int i = 0; 237 238 if (!list_attr) 239 return -EINVAL; 240 241 nla_for_each_nested(n, list_attr, rem) { 242 if (nla_type(n) != TCA_GATE_ONE_ENTRY) { 243 NL_SET_ERR_MSG(extack, "Attribute isn't type 'entry'"); 244 continue; 245 } 246 247 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 248 if (!entry) { 249 NL_SET_ERR_MSG(extack, "Not enough memory for entry"); 250 err = -ENOMEM; 251 goto release_list; 252 } 253 254 err = parse_gate_entry(n, entry, i, extack); 255 if (err < 0) { 256 kfree(entry); 257 goto release_list; 258 } 259 260 list_add_tail(&entry->list, &sched->entries); 261 i++; 262 } 263 264 sched->num_entries = i; 265 266 return i; 267 268 release_list: 269 release_entry_list(&sched->entries); 270 271 return err; 272 } 273 274 static void gate_setup_timer(struct tcf_gate *gact, u64 basetime, 275 enum tk_offsets tko, s32 clockid, 276 bool do_init) 277 { 278 if (!do_init) { 279 if (basetime == gact->param.tcfg_basetime && 280 tko == gact->tk_offset && 281 clockid == gact->param.tcfg_clockid) 282 return; 283 284 spin_unlock_bh(&gact->tcf_lock); 285 hrtimer_cancel(&gact->hitimer); 286 spin_lock_bh(&gact->tcf_lock); 287 } 288 gact->param.tcfg_basetime = basetime; 289 gact->param.tcfg_clockid = clockid; 290 gact->tk_offset = tko; 291 hrtimer_init(&gact->hitimer, clockid, HRTIMER_MODE_ABS_SOFT); 292 gact->hitimer.function = gate_timer_func; 293 } 294 295 static int tcf_gate_init(struct net *net, struct nlattr *nla, 296 struct nlattr *est, struct tc_action **a, 297 struct tcf_proto *tp, u32 flags, 298 struct netlink_ext_ack *extack) 299 { 300 struct tc_action_net *tn = net_generic(net, act_gate_ops.net_id); 301 enum tk_offsets tk_offset = TK_OFFS_TAI; 302 bool bind = flags & TCA_ACT_FLAGS_BIND; 303 struct nlattr *tb[TCA_GATE_MAX + 1]; 304 struct tcf_chain *goto_ch = NULL; 305 u64 cycletime = 0, basetime = 0; 306 struct tcf_gate_params *p; 307 s32 clockid = CLOCK_TAI; 308 struct tcf_gate *gact; 309 struct tc_gate *parm; 310 int ret = 0, err; 311 u32 gflags = 0; 312 s32 prio = -1; 313 ktime_t start; 314 u32 index; 315 316 if (!nla) 317 return -EINVAL; 318 319 err = nla_parse_nested(tb, TCA_GATE_MAX, nla, gate_policy, extack); 320 if (err < 0) 321 return err; 322 323 if (!tb[TCA_GATE_PARMS]) 324 return -EINVAL; 325 326 if (tb[TCA_GATE_CLOCKID]) { 327 clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]); 328 switch (clockid) { 329 case CLOCK_REALTIME: 330 tk_offset = TK_OFFS_REAL; 331 break; 332 case CLOCK_MONOTONIC: 333 tk_offset = TK_OFFS_MAX; 334 break; 335 case CLOCK_BOOTTIME: 336 tk_offset = TK_OFFS_BOOT; 337 break; 338 case CLOCK_TAI: 339 tk_offset = TK_OFFS_TAI; 340 break; 341 default: 342 NL_SET_ERR_MSG(extack, "Invalid 'clockid'"); 343 return -EINVAL; 344 } 345 } 346 347 parm = nla_data(tb[TCA_GATE_PARMS]); 348 index = parm->index; 349 350 err = tcf_idr_check_alloc(tn, &index, a, bind); 351 if (err < 0) 352 return err; 353 354 if (err && bind) 355 return 0; 356 357 if (!err) { 358 ret = tcf_idr_create(tn, index, est, a, 359 &act_gate_ops, bind, false, flags); 360 if (ret) { 361 tcf_idr_cleanup(tn, index); 362 return ret; 363 } 364 365 ret = ACT_P_CREATED; 366 } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) { 367 tcf_idr_release(*a, bind); 368 return -EEXIST; 369 } 370 371 if (tb[TCA_GATE_PRIORITY]) 372 prio = nla_get_s32(tb[TCA_GATE_PRIORITY]); 373 374 if (tb[TCA_GATE_BASE_TIME]) 375 basetime = nla_get_u64(tb[TCA_GATE_BASE_TIME]); 376 377 if (tb[TCA_GATE_FLAGS]) 378 gflags = nla_get_u32(tb[TCA_GATE_FLAGS]); 379 380 gact = to_gate(*a); 381 if (ret == ACT_P_CREATED) 382 INIT_LIST_HEAD(&gact->param.entries); 383 384 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); 385 if (err < 0) 386 goto release_idr; 387 388 spin_lock_bh(&gact->tcf_lock); 389 p = &gact->param; 390 391 if (tb[TCA_GATE_CYCLE_TIME]) 392 cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]); 393 394 if (tb[TCA_GATE_ENTRY_LIST]) { 395 err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], p, extack); 396 if (err < 0) 397 goto chain_put; 398 } 399 400 if (!cycletime) { 401 struct tcfg_gate_entry *entry; 402 ktime_t cycle = 0; 403 404 list_for_each_entry(entry, &p->entries, list) 405 cycle = ktime_add_ns(cycle, entry->interval); 406 cycletime = cycle; 407 if (!cycletime) { 408 err = -EINVAL; 409 goto chain_put; 410 } 411 } 412 p->tcfg_cycletime = cycletime; 413 414 if (tb[TCA_GATE_CYCLE_TIME_EXT]) 415 p->tcfg_cycletime_ext = 416 nla_get_u64(tb[TCA_GATE_CYCLE_TIME_EXT]); 417 418 gate_setup_timer(gact, basetime, tk_offset, clockid, 419 ret == ACT_P_CREATED); 420 p->tcfg_priority = prio; 421 p->tcfg_flags = gflags; 422 gate_get_start_time(gact, &start); 423 424 gact->current_close_time = start; 425 gact->current_gate_status = GATE_ACT_GATE_OPEN | GATE_ACT_PENDING; 426 427 gact->next_entry = list_first_entry(&p->entries, 428 struct tcfg_gate_entry, list); 429 430 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); 431 432 gate_start_timer(gact, start); 433 434 spin_unlock_bh(&gact->tcf_lock); 435 436 if (goto_ch) 437 tcf_chain_put_by_act(goto_ch); 438 439 return ret; 440 441 chain_put: 442 spin_unlock_bh(&gact->tcf_lock); 443 444 if (goto_ch) 445 tcf_chain_put_by_act(goto_ch); 446 release_idr: 447 /* action is not inserted in any list: it's safe to init hitimer 448 * without taking tcf_lock. 449 */ 450 if (ret == ACT_P_CREATED) 451 gate_setup_timer(gact, gact->param.tcfg_basetime, 452 gact->tk_offset, gact->param.tcfg_clockid, 453 true); 454 tcf_idr_release(*a, bind); 455 return err; 456 } 457 458 static void tcf_gate_cleanup(struct tc_action *a) 459 { 460 struct tcf_gate *gact = to_gate(a); 461 struct tcf_gate_params *p; 462 463 p = &gact->param; 464 hrtimer_cancel(&gact->hitimer); 465 release_entry_list(&p->entries); 466 } 467 468 static int dumping_entry(struct sk_buff *skb, 469 struct tcfg_gate_entry *entry) 470 { 471 struct nlattr *item; 472 473 item = nla_nest_start_noflag(skb, TCA_GATE_ONE_ENTRY); 474 if (!item) 475 return -ENOSPC; 476 477 if (nla_put_u32(skb, TCA_GATE_ENTRY_INDEX, entry->index)) 478 goto nla_put_failure; 479 480 if (entry->gate_state && nla_put_flag(skb, TCA_GATE_ENTRY_GATE)) 481 goto nla_put_failure; 482 483 if (nla_put_u32(skb, TCA_GATE_ENTRY_INTERVAL, entry->interval)) 484 goto nla_put_failure; 485 486 if (nla_put_s32(skb, TCA_GATE_ENTRY_MAX_OCTETS, entry->maxoctets)) 487 goto nla_put_failure; 488 489 if (nla_put_s32(skb, TCA_GATE_ENTRY_IPV, entry->ipv)) 490 goto nla_put_failure; 491 492 return nla_nest_end(skb, item); 493 494 nla_put_failure: 495 nla_nest_cancel(skb, item); 496 return -1; 497 } 498 499 static int tcf_gate_dump(struct sk_buff *skb, struct tc_action *a, 500 int bind, int ref) 501 { 502 unsigned char *b = skb_tail_pointer(skb); 503 struct tcf_gate *gact = to_gate(a); 504 struct tc_gate opt = { 505 .index = gact->tcf_index, 506 .refcnt = refcount_read(&gact->tcf_refcnt) - ref, 507 .bindcnt = atomic_read(&gact->tcf_bindcnt) - bind, 508 }; 509 struct tcfg_gate_entry *entry; 510 struct tcf_gate_params *p; 511 struct nlattr *entry_list; 512 struct tcf_t t; 513 514 spin_lock_bh(&gact->tcf_lock); 515 opt.action = gact->tcf_action; 516 517 p = &gact->param; 518 519 if (nla_put(skb, TCA_GATE_PARMS, sizeof(opt), &opt)) 520 goto nla_put_failure; 521 522 if (nla_put_u64_64bit(skb, TCA_GATE_BASE_TIME, 523 p->tcfg_basetime, TCA_GATE_PAD)) 524 goto nla_put_failure; 525 526 if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME, 527 p->tcfg_cycletime, TCA_GATE_PAD)) 528 goto nla_put_failure; 529 530 if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME_EXT, 531 p->tcfg_cycletime_ext, TCA_GATE_PAD)) 532 goto nla_put_failure; 533 534 if (nla_put_s32(skb, TCA_GATE_CLOCKID, p->tcfg_clockid)) 535 goto nla_put_failure; 536 537 if (nla_put_u32(skb, TCA_GATE_FLAGS, p->tcfg_flags)) 538 goto nla_put_failure; 539 540 if (nla_put_s32(skb, TCA_GATE_PRIORITY, p->tcfg_priority)) 541 goto nla_put_failure; 542 543 entry_list = nla_nest_start_noflag(skb, TCA_GATE_ENTRY_LIST); 544 if (!entry_list) 545 goto nla_put_failure; 546 547 list_for_each_entry(entry, &p->entries, list) { 548 if (dumping_entry(skb, entry) < 0) 549 goto nla_put_failure; 550 } 551 552 nla_nest_end(skb, entry_list); 553 554 tcf_tm_dump(&t, &gact->tcf_tm); 555 if (nla_put_64bit(skb, TCA_GATE_TM, sizeof(t), &t, TCA_GATE_PAD)) 556 goto nla_put_failure; 557 spin_unlock_bh(&gact->tcf_lock); 558 559 return skb->len; 560 561 nla_put_failure: 562 spin_unlock_bh(&gact->tcf_lock); 563 nlmsg_trim(skb, b); 564 return -1; 565 } 566 567 static void tcf_gate_stats_update(struct tc_action *a, u64 bytes, u64 packets, 568 u64 drops, u64 lastuse, bool hw) 569 { 570 struct tcf_gate *gact = to_gate(a); 571 struct tcf_t *tm = &gact->tcf_tm; 572 573 tcf_action_update_stats(a, bytes, packets, drops, hw); 574 tm->lastuse = max_t(u64, tm->lastuse, lastuse); 575 } 576 577 static size_t tcf_gate_get_fill_size(const struct tc_action *act) 578 { 579 return nla_total_size(sizeof(struct tc_gate)); 580 } 581 582 static void tcf_gate_entry_destructor(void *priv) 583 { 584 struct action_gate_entry *oe = priv; 585 586 kfree(oe); 587 } 588 589 static int tcf_gate_get_entries(struct flow_action_entry *entry, 590 const struct tc_action *act) 591 { 592 entry->gate.entries = tcf_gate_get_list(act); 593 594 if (!entry->gate.entries) 595 return -EINVAL; 596 597 entry->destructor = tcf_gate_entry_destructor; 598 entry->destructor_priv = entry->gate.entries; 599 600 return 0; 601 } 602 603 static int tcf_gate_offload_act_setup(struct tc_action *act, void *entry_data, 604 u32 *index_inc, bool bind, 605 struct netlink_ext_ack *extack) 606 { 607 int err; 608 609 if (bind) { 610 struct flow_action_entry *entry = entry_data; 611 612 entry->id = FLOW_ACTION_GATE; 613 entry->gate.prio = tcf_gate_prio(act); 614 entry->gate.basetime = tcf_gate_basetime(act); 615 entry->gate.cycletime = tcf_gate_cycletime(act); 616 entry->gate.cycletimeext = tcf_gate_cycletimeext(act); 617 entry->gate.num_entries = tcf_gate_num_entries(act); 618 err = tcf_gate_get_entries(entry, act); 619 if (err) 620 return err; 621 *index_inc = 1; 622 } else { 623 struct flow_offload_action *fl_action = entry_data; 624 625 fl_action->id = FLOW_ACTION_GATE; 626 } 627 628 return 0; 629 } 630 631 static struct tc_action_ops act_gate_ops = { 632 .kind = "gate", 633 .id = TCA_ID_GATE, 634 .owner = THIS_MODULE, 635 .act = tcf_gate_act, 636 .dump = tcf_gate_dump, 637 .init = tcf_gate_init, 638 .cleanup = tcf_gate_cleanup, 639 .stats_update = tcf_gate_stats_update, 640 .get_fill_size = tcf_gate_get_fill_size, 641 .offload_act_setup = tcf_gate_offload_act_setup, 642 .size = sizeof(struct tcf_gate), 643 }; 644 645 static __net_init int gate_init_net(struct net *net) 646 { 647 struct tc_action_net *tn = net_generic(net, act_gate_ops.net_id); 648 649 return tc_action_net_init(net, tn, &act_gate_ops); 650 } 651 652 static void __net_exit gate_exit_net(struct list_head *net_list) 653 { 654 tc_action_net_exit(net_list, act_gate_ops.net_id); 655 } 656 657 static struct pernet_operations gate_net_ops = { 658 .init = gate_init_net, 659 .exit_batch = gate_exit_net, 660 .id = &act_gate_ops.net_id, 661 .size = sizeof(struct tc_action_net), 662 }; 663 664 static int __init gate_init_module(void) 665 { 666 return tcf_register_action(&act_gate_ops, &gate_net_ops); 667 } 668 669 static void __exit gate_cleanup_module(void) 670 { 671 tcf_unregister_action(&act_gate_ops, &gate_net_ops); 672 } 673 674 module_init(gate_init_module); 675 module_exit(gate_cleanup_module); 676 MODULE_LICENSE("GPL v2"); 677