1 // SPDX-License-Identifier: GPL-2.0 2 3 /* net/sched/sch_taprio.c Time Aware Priority Scheduler 4 * 5 * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com> 6 * 7 */ 8 9 #include <linux/ethtool.h> 10 #include <linux/types.h> 11 #include <linux/slab.h> 12 #include <linux/kernel.h> 13 #include <linux/string.h> 14 #include <linux/list.h> 15 #include <linux/errno.h> 16 #include <linux/skbuff.h> 17 #include <linux/math64.h> 18 #include <linux/module.h> 19 #include <linux/spinlock.h> 20 #include <linux/rcupdate.h> 21 #include <net/netlink.h> 22 #include <net/pkt_sched.h> 23 #include <net/pkt_cls.h> 24 #include <net/sch_generic.h> 25 #include <net/sock.h> 26 #include <net/tcp.h> 27 28 static LIST_HEAD(taprio_list); 29 static DEFINE_SPINLOCK(taprio_list_lock); 30 31 #define TAPRIO_ALL_GATES_OPEN -1 32 33 #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) 34 #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD) 35 #define TAPRIO_FLAGS_INVALID U32_MAX 36 37 struct sched_entry { 38 struct list_head list; 39 40 /* The instant that this entry "closes" and the next one 41 * should open, the qdisc will make some effort so that no 42 * packet leaves after this time. 43 */ 44 ktime_t close_time; 45 ktime_t next_txtime; 46 atomic_t budget; 47 int index; 48 u32 gate_mask; 49 u32 interval; 50 u8 command; 51 }; 52 53 struct sched_gate_list { 54 struct rcu_head rcu; 55 struct list_head entries; 56 size_t num_entries; 57 ktime_t cycle_close_time; 58 s64 cycle_time; 59 s64 cycle_time_extension; 60 s64 base_time; 61 }; 62 63 struct taprio_sched { 64 struct Qdisc **qdiscs; 65 struct Qdisc *root; 66 u32 flags; 67 enum tk_offsets tk_offset; 68 int clockid; 69 atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+ 70 * speeds it's sub-nanoseconds per byte 71 */ 72 73 /* Protects the update side of the RCU protected current_entry */ 74 spinlock_t current_entry_lock; 75 struct sched_entry __rcu *current_entry; 76 struct sched_gate_list __rcu *oper_sched; 77 struct sched_gate_list __rcu *admin_sched; 78 struct hrtimer advance_timer; 79 struct list_head taprio_list; 80 struct sk_buff *(*dequeue)(struct Qdisc *sch); 81 struct sk_buff *(*peek)(struct Qdisc *sch); 82 u32 txtime_delay; 83 }; 84 85 struct __tc_taprio_qopt_offload { 86 refcount_t users; 87 struct tc_taprio_qopt_offload offload; 88 }; 89 90 static ktime_t sched_base_time(const struct sched_gate_list *sched) 91 { 92 if (!sched) 93 return KTIME_MAX; 94 95 return ns_to_ktime(sched->base_time); 96 } 97 98 static ktime_t taprio_get_time(struct taprio_sched *q) 99 { 100 ktime_t mono = ktime_get(); 101 102 switch (q->tk_offset) { 103 case TK_OFFS_MAX: 104 return mono; 105 default: 106 return ktime_mono_to_any(mono, q->tk_offset); 107 } 108 109 return KTIME_MAX; 110 } 111 112 static void taprio_free_sched_cb(struct rcu_head *head) 113 { 114 struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu); 115 struct sched_entry *entry, *n; 116 117 if (!sched) 118 return; 119 120 list_for_each_entry_safe(entry, n, &sched->entries, list) { 121 list_del(&entry->list); 122 kfree(entry); 123 } 124 125 kfree(sched); 126 } 127 128 static void switch_schedules(struct taprio_sched *q, 129 struct sched_gate_list **admin, 130 struct sched_gate_list **oper) 131 { 132 rcu_assign_pointer(q->oper_sched, *admin); 133 rcu_assign_pointer(q->admin_sched, NULL); 134 135 if (*oper) 136 call_rcu(&(*oper)->rcu, taprio_free_sched_cb); 137 138 *oper = *admin; 139 *admin = NULL; 140 } 141 142 /* Get how much time has been already elapsed in the current cycle. */ 143 static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time) 144 { 145 ktime_t time_since_sched_start; 146 s32 time_elapsed; 147 148 time_since_sched_start = ktime_sub(time, sched->base_time); 149 div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed); 150 151 return time_elapsed; 152 } 153 154 static ktime_t get_interval_end_time(struct sched_gate_list *sched, 155 struct sched_gate_list *admin, 156 struct sched_entry *entry, 157 ktime_t intv_start) 158 { 159 s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start); 160 ktime_t intv_end, cycle_ext_end, cycle_end; 161 162 cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed); 163 intv_end = ktime_add_ns(intv_start, entry->interval); 164 cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension); 165 166 if (ktime_before(intv_end, cycle_end)) 167 return intv_end; 168 else if (admin && admin != sched && 169 ktime_after(admin->base_time, cycle_end) && 170 ktime_before(admin->base_time, cycle_ext_end)) 171 return admin->base_time; 172 else 173 return cycle_end; 174 } 175 176 static int length_to_duration(struct taprio_sched *q, int len) 177 { 178 return div_u64(len * atomic64_read(&q->picos_per_byte), 1000); 179 } 180 181 /* Returns the entry corresponding to next available interval. If 182 * validate_interval is set, it only validates whether the timestamp occurs 183 * when the gate corresponding to the skb's traffic class is open. 184 */ 185 static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb, 186 struct Qdisc *sch, 187 struct sched_gate_list *sched, 188 struct sched_gate_list *admin, 189 ktime_t time, 190 ktime_t *interval_start, 191 ktime_t *interval_end, 192 bool validate_interval) 193 { 194 ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time; 195 ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time; 196 struct sched_entry *entry = NULL, *entry_found = NULL; 197 struct taprio_sched *q = qdisc_priv(sch); 198 struct net_device *dev = qdisc_dev(sch); 199 bool entry_available = false; 200 s32 cycle_elapsed; 201 int tc, n; 202 203 tc = netdev_get_prio_tc_map(dev, skb->priority); 204 packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb)); 205 206 *interval_start = 0; 207 *interval_end = 0; 208 209 if (!sched) 210 return NULL; 211 212 cycle = sched->cycle_time; 213 cycle_elapsed = get_cycle_time_elapsed(sched, time); 214 curr_intv_end = ktime_sub_ns(time, cycle_elapsed); 215 cycle_end = ktime_add_ns(curr_intv_end, cycle); 216 217 list_for_each_entry(entry, &sched->entries, list) { 218 curr_intv_start = curr_intv_end; 219 curr_intv_end = get_interval_end_time(sched, admin, entry, 220 curr_intv_start); 221 222 if (ktime_after(curr_intv_start, cycle_end)) 223 break; 224 225 if (!(entry->gate_mask & BIT(tc)) || 226 packet_transmit_time > entry->interval) 227 continue; 228 229 txtime = entry->next_txtime; 230 231 if (ktime_before(txtime, time) || validate_interval) { 232 transmit_end_time = ktime_add_ns(time, packet_transmit_time); 233 if ((ktime_before(curr_intv_start, time) && 234 ktime_before(transmit_end_time, curr_intv_end)) || 235 (ktime_after(curr_intv_start, time) && !validate_interval)) { 236 entry_found = entry; 237 *interval_start = curr_intv_start; 238 *interval_end = curr_intv_end; 239 break; 240 } else if (!entry_available && !validate_interval) { 241 /* Here, we are just trying to find out the 242 * first available interval in the next cycle. 243 */ 244 entry_available = 1; 245 entry_found = entry; 246 *interval_start = ktime_add_ns(curr_intv_start, cycle); 247 *interval_end = ktime_add_ns(curr_intv_end, cycle); 248 } 249 } else if (ktime_before(txtime, earliest_txtime) && 250 !entry_available) { 251 earliest_txtime = txtime; 252 entry_found = entry; 253 n = div_s64(ktime_sub(txtime, curr_intv_start), cycle); 254 *interval_start = ktime_add(curr_intv_start, n * cycle); 255 *interval_end = ktime_add(curr_intv_end, n * cycle); 256 } 257 } 258 259 return entry_found; 260 } 261 262 static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch) 263 { 264 struct taprio_sched *q = qdisc_priv(sch); 265 struct sched_gate_list *sched, *admin; 266 ktime_t interval_start, interval_end; 267 struct sched_entry *entry; 268 269 rcu_read_lock(); 270 sched = rcu_dereference(q->oper_sched); 271 admin = rcu_dereference(q->admin_sched); 272 273 entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp, 274 &interval_start, &interval_end, true); 275 rcu_read_unlock(); 276 277 return entry; 278 } 279 280 static bool taprio_flags_valid(u32 flags) 281 { 282 /* Make sure no other flag bits are set. */ 283 if (flags & ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST | 284 TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)) 285 return false; 286 /* txtime-assist and full offload are mutually exclusive */ 287 if ((flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) && 288 (flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)) 289 return false; 290 return true; 291 } 292 293 /* This returns the tstamp value set by TCP in terms of the set clock. */ 294 static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb) 295 { 296 unsigned int offset = skb_network_offset(skb); 297 const struct ipv6hdr *ipv6h; 298 const struct iphdr *iph; 299 struct ipv6hdr _ipv6h; 300 301 ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h); 302 if (!ipv6h) 303 return 0; 304 305 if (ipv6h->version == 4) { 306 iph = (struct iphdr *)ipv6h; 307 offset += iph->ihl * 4; 308 309 /* special-case 6in4 tunnelling, as that is a common way to get 310 * v6 connectivity in the home 311 */ 312 if (iph->protocol == IPPROTO_IPV6) { 313 ipv6h = skb_header_pointer(skb, offset, 314 sizeof(_ipv6h), &_ipv6h); 315 316 if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP) 317 return 0; 318 } else if (iph->protocol != IPPROTO_TCP) { 319 return 0; 320 } 321 } else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) { 322 return 0; 323 } 324 325 return ktime_mono_to_any(skb->skb_mstamp_ns, q->tk_offset); 326 } 327 328 /* There are a few scenarios where we will have to modify the txtime from 329 * what is read from next_txtime in sched_entry. They are: 330 * 1. If txtime is in the past, 331 * a. The gate for the traffic class is currently open and packet can be 332 * transmitted before it closes, schedule the packet right away. 333 * b. If the gate corresponding to the traffic class is going to open later 334 * in the cycle, set the txtime of packet to the interval start. 335 * 2. If txtime is in the future, there are packets corresponding to the 336 * current traffic class waiting to be transmitted. So, the following 337 * possibilities exist: 338 * a. We can transmit the packet before the window containing the txtime 339 * closes. 340 * b. The window might close before the transmission can be completed 341 * successfully. So, schedule the packet in the next open window. 342 */ 343 static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch) 344 { 345 ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp; 346 struct taprio_sched *q = qdisc_priv(sch); 347 struct sched_gate_list *sched, *admin; 348 ktime_t minimum_time, now, txtime; 349 int len, packet_transmit_time; 350 struct sched_entry *entry; 351 bool sched_changed; 352 353 now = taprio_get_time(q); 354 minimum_time = ktime_add_ns(now, q->txtime_delay); 355 356 tcp_tstamp = get_tcp_tstamp(q, skb); 357 minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp); 358 359 rcu_read_lock(); 360 admin = rcu_dereference(q->admin_sched); 361 sched = rcu_dereference(q->oper_sched); 362 if (admin && ktime_after(minimum_time, admin->base_time)) 363 switch_schedules(q, &admin, &sched); 364 365 /* Until the schedule starts, all the queues are open */ 366 if (!sched || ktime_before(minimum_time, sched->base_time)) { 367 txtime = minimum_time; 368 goto done; 369 } 370 371 len = qdisc_pkt_len(skb); 372 packet_transmit_time = length_to_duration(q, len); 373 374 do { 375 sched_changed = 0; 376 377 entry = find_entry_to_transmit(skb, sch, sched, admin, 378 minimum_time, 379 &interval_start, &interval_end, 380 false); 381 if (!entry) { 382 txtime = 0; 383 goto done; 384 } 385 386 txtime = entry->next_txtime; 387 txtime = max_t(ktime_t, txtime, minimum_time); 388 txtime = max_t(ktime_t, txtime, interval_start); 389 390 if (admin && admin != sched && 391 ktime_after(txtime, admin->base_time)) { 392 sched = admin; 393 sched_changed = 1; 394 continue; 395 } 396 397 transmit_end_time = ktime_add(txtime, packet_transmit_time); 398 minimum_time = transmit_end_time; 399 400 /* Update the txtime of current entry to the next time it's 401 * interval starts. 402 */ 403 if (ktime_after(transmit_end_time, interval_end)) 404 entry->next_txtime = ktime_add(interval_start, sched->cycle_time); 405 } while (sched_changed || ktime_after(transmit_end_time, interval_end)); 406 407 entry->next_txtime = transmit_end_time; 408 409 done: 410 rcu_read_unlock(); 411 return txtime; 412 } 413 414 static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, 415 struct sk_buff **to_free) 416 { 417 struct taprio_sched *q = qdisc_priv(sch); 418 struct Qdisc *child; 419 int queue; 420 421 queue = skb_get_queue_mapping(skb); 422 423 child = q->qdiscs[queue]; 424 if (unlikely(!child)) 425 return qdisc_drop(skb, sch, to_free); 426 427 if (skb->sk && sock_flag(skb->sk, SOCK_TXTIME)) { 428 if (!is_valid_interval(skb, sch)) 429 return qdisc_drop(skb, sch, to_free); 430 } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { 431 skb->tstamp = get_packet_txtime(skb, sch); 432 if (!skb->tstamp) 433 return qdisc_drop(skb, sch, to_free); 434 } 435 436 qdisc_qstats_backlog_inc(sch, skb); 437 sch->q.qlen++; 438 439 return qdisc_enqueue(skb, child, to_free); 440 } 441 442 static struct sk_buff *taprio_peek_soft(struct Qdisc *sch) 443 { 444 struct taprio_sched *q = qdisc_priv(sch); 445 struct net_device *dev = qdisc_dev(sch); 446 struct sched_entry *entry; 447 struct sk_buff *skb; 448 u32 gate_mask; 449 int i; 450 451 rcu_read_lock(); 452 entry = rcu_dereference(q->current_entry); 453 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN; 454 rcu_read_unlock(); 455 456 if (!gate_mask) 457 return NULL; 458 459 for (i = 0; i < dev->num_tx_queues; i++) { 460 struct Qdisc *child = q->qdiscs[i]; 461 int prio; 462 u8 tc; 463 464 if (unlikely(!child)) 465 continue; 466 467 skb = child->ops->peek(child); 468 if (!skb) 469 continue; 470 471 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) 472 return skb; 473 474 prio = skb->priority; 475 tc = netdev_get_prio_tc_map(dev, prio); 476 477 if (!(gate_mask & BIT(tc))) 478 continue; 479 480 return skb; 481 } 482 483 return NULL; 484 } 485 486 static struct sk_buff *taprio_peek_offload(struct Qdisc *sch) 487 { 488 struct taprio_sched *q = qdisc_priv(sch); 489 struct net_device *dev = qdisc_dev(sch); 490 struct sk_buff *skb; 491 int i; 492 493 for (i = 0; i < dev->num_tx_queues; i++) { 494 struct Qdisc *child = q->qdiscs[i]; 495 496 if (unlikely(!child)) 497 continue; 498 499 skb = child->ops->peek(child); 500 if (!skb) 501 continue; 502 503 return skb; 504 } 505 506 return NULL; 507 } 508 509 static struct sk_buff *taprio_peek(struct Qdisc *sch) 510 { 511 struct taprio_sched *q = qdisc_priv(sch); 512 513 return q->peek(sch); 514 } 515 516 static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry) 517 { 518 atomic_set(&entry->budget, 519 div64_u64((u64)entry->interval * 1000, 520 atomic64_read(&q->picos_per_byte))); 521 } 522 523 static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch) 524 { 525 struct taprio_sched *q = qdisc_priv(sch); 526 struct net_device *dev = qdisc_dev(sch); 527 struct sk_buff *skb = NULL; 528 struct sched_entry *entry; 529 u32 gate_mask; 530 int i; 531 532 rcu_read_lock(); 533 entry = rcu_dereference(q->current_entry); 534 /* if there's no entry, it means that the schedule didn't 535 * start yet, so force all gates to be open, this is in 536 * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5 537 * "AdminGateSates" 538 */ 539 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN; 540 541 if (!gate_mask) 542 goto done; 543 544 for (i = 0; i < dev->num_tx_queues; i++) { 545 struct Qdisc *child = q->qdiscs[i]; 546 ktime_t guard; 547 int prio; 548 int len; 549 u8 tc; 550 551 if (unlikely(!child)) 552 continue; 553 554 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { 555 skb = child->ops->dequeue(child); 556 if (!skb) 557 continue; 558 goto skb_found; 559 } 560 561 skb = child->ops->peek(child); 562 if (!skb) 563 continue; 564 565 prio = skb->priority; 566 tc = netdev_get_prio_tc_map(dev, prio); 567 568 if (!(gate_mask & BIT(tc))) { 569 skb = NULL; 570 continue; 571 } 572 573 len = qdisc_pkt_len(skb); 574 guard = ktime_add_ns(taprio_get_time(q), 575 length_to_duration(q, len)); 576 577 /* In the case that there's no gate entry, there's no 578 * guard band ... 579 */ 580 if (gate_mask != TAPRIO_ALL_GATES_OPEN && 581 ktime_after(guard, entry->close_time)) { 582 skb = NULL; 583 continue; 584 } 585 586 /* ... and no budget. */ 587 if (gate_mask != TAPRIO_ALL_GATES_OPEN && 588 atomic_sub_return(len, &entry->budget) < 0) { 589 skb = NULL; 590 continue; 591 } 592 593 skb = child->ops->dequeue(child); 594 if (unlikely(!skb)) 595 goto done; 596 597 skb_found: 598 qdisc_bstats_update(sch, skb); 599 qdisc_qstats_backlog_dec(sch, skb); 600 sch->q.qlen--; 601 602 goto done; 603 } 604 605 done: 606 rcu_read_unlock(); 607 608 return skb; 609 } 610 611 static struct sk_buff *taprio_dequeue_offload(struct Qdisc *sch) 612 { 613 struct taprio_sched *q = qdisc_priv(sch); 614 struct net_device *dev = qdisc_dev(sch); 615 struct sk_buff *skb; 616 int i; 617 618 for (i = 0; i < dev->num_tx_queues; i++) { 619 struct Qdisc *child = q->qdiscs[i]; 620 621 if (unlikely(!child)) 622 continue; 623 624 skb = child->ops->dequeue(child); 625 if (unlikely(!skb)) 626 continue; 627 628 qdisc_bstats_update(sch, skb); 629 qdisc_qstats_backlog_dec(sch, skb); 630 sch->q.qlen--; 631 632 return skb; 633 } 634 635 return NULL; 636 } 637 638 static struct sk_buff *taprio_dequeue(struct Qdisc *sch) 639 { 640 struct taprio_sched *q = qdisc_priv(sch); 641 642 return q->dequeue(sch); 643 } 644 645 static bool should_restart_cycle(const struct sched_gate_list *oper, 646 const struct sched_entry *entry) 647 { 648 if (list_is_last(&entry->list, &oper->entries)) 649 return true; 650 651 if (ktime_compare(entry->close_time, oper->cycle_close_time) == 0) 652 return true; 653 654 return false; 655 } 656 657 static bool should_change_schedules(const struct sched_gate_list *admin, 658 const struct sched_gate_list *oper, 659 ktime_t close_time) 660 { 661 ktime_t next_base_time, extension_time; 662 663 if (!admin) 664 return false; 665 666 next_base_time = sched_base_time(admin); 667 668 /* This is the simple case, the close_time would fall after 669 * the next schedule base_time. 670 */ 671 if (ktime_compare(next_base_time, close_time) <= 0) 672 return true; 673 674 /* This is the cycle_time_extension case, if the close_time 675 * plus the amount that can be extended would fall after the 676 * next schedule base_time, we can extend the current schedule 677 * for that amount. 678 */ 679 extension_time = ktime_add_ns(close_time, oper->cycle_time_extension); 680 681 /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about 682 * how precisely the extension should be made. So after 683 * conformance testing, this logic may change. 684 */ 685 if (ktime_compare(next_base_time, extension_time) <= 0) 686 return true; 687 688 return false; 689 } 690 691 static enum hrtimer_restart advance_sched(struct hrtimer *timer) 692 { 693 struct taprio_sched *q = container_of(timer, struct taprio_sched, 694 advance_timer); 695 struct sched_gate_list *oper, *admin; 696 struct sched_entry *entry, *next; 697 struct Qdisc *sch = q->root; 698 ktime_t close_time; 699 700 spin_lock(&q->current_entry_lock); 701 entry = rcu_dereference_protected(q->current_entry, 702 lockdep_is_held(&q->current_entry_lock)); 703 oper = rcu_dereference_protected(q->oper_sched, 704 lockdep_is_held(&q->current_entry_lock)); 705 admin = rcu_dereference_protected(q->admin_sched, 706 lockdep_is_held(&q->current_entry_lock)); 707 708 if (!oper) 709 switch_schedules(q, &admin, &oper); 710 711 /* This can happen in two cases: 1. this is the very first run 712 * of this function (i.e. we weren't running any schedule 713 * previously); 2. The previous schedule just ended. The first 714 * entry of all schedules are pre-calculated during the 715 * schedule initialization. 716 */ 717 if (unlikely(!entry || entry->close_time == oper->base_time)) { 718 next = list_first_entry(&oper->entries, struct sched_entry, 719 list); 720 close_time = next->close_time; 721 goto first_run; 722 } 723 724 if (should_restart_cycle(oper, entry)) { 725 next = list_first_entry(&oper->entries, struct sched_entry, 726 list); 727 oper->cycle_close_time = ktime_add_ns(oper->cycle_close_time, 728 oper->cycle_time); 729 } else { 730 next = list_next_entry(entry, list); 731 } 732 733 close_time = ktime_add_ns(entry->close_time, next->interval); 734 close_time = min_t(ktime_t, close_time, oper->cycle_close_time); 735 736 if (should_change_schedules(admin, oper, close_time)) { 737 /* Set things so the next time this runs, the new 738 * schedule runs. 739 */ 740 close_time = sched_base_time(admin); 741 switch_schedules(q, &admin, &oper); 742 } 743 744 next->close_time = close_time; 745 taprio_set_budget(q, next); 746 747 first_run: 748 rcu_assign_pointer(q->current_entry, next); 749 spin_unlock(&q->current_entry_lock); 750 751 hrtimer_set_expires(&q->advance_timer, close_time); 752 753 rcu_read_lock(); 754 __netif_schedule(sch); 755 rcu_read_unlock(); 756 757 return HRTIMER_RESTART; 758 } 759 760 static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { 761 [TCA_TAPRIO_SCHED_ENTRY_INDEX] = { .type = NLA_U32 }, 762 [TCA_TAPRIO_SCHED_ENTRY_CMD] = { .type = NLA_U8 }, 763 [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 }, 764 [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 }, 765 }; 766 767 static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { 768 [TCA_TAPRIO_ATTR_PRIOMAP] = { 769 .len = sizeof(struct tc_mqprio_qopt) 770 }, 771 [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED }, 772 [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 }, 773 [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED }, 774 [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 }, 775 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 }, 776 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 }, 777 [TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 }, 778 [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 }, 779 }; 780 781 static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb, 782 struct sched_entry *entry, 783 struct netlink_ext_ack *extack) 784 { 785 int min_duration = length_to_duration(q, ETH_ZLEN); 786 u32 interval = 0; 787 788 if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD]) 789 entry->command = nla_get_u8( 790 tb[TCA_TAPRIO_SCHED_ENTRY_CMD]); 791 792 if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]) 793 entry->gate_mask = nla_get_u32( 794 tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]); 795 796 if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]) 797 interval = nla_get_u32( 798 tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]); 799 800 /* The interval should allow at least the minimum ethernet 801 * frame to go out. 802 */ 803 if (interval < min_duration) { 804 NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry"); 805 return -EINVAL; 806 } 807 808 entry->interval = interval; 809 810 return 0; 811 } 812 813 static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n, 814 struct sched_entry *entry, int index, 815 struct netlink_ext_ack *extack) 816 { 817 struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { }; 818 int err; 819 820 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n, 821 entry_policy, NULL); 822 if (err < 0) { 823 NL_SET_ERR_MSG(extack, "Could not parse nested entry"); 824 return -EINVAL; 825 } 826 827 entry->index = index; 828 829 return fill_sched_entry(q, tb, entry, extack); 830 } 831 832 static int parse_sched_list(struct taprio_sched *q, struct nlattr *list, 833 struct sched_gate_list *sched, 834 struct netlink_ext_ack *extack) 835 { 836 struct nlattr *n; 837 int err, rem; 838 int i = 0; 839 840 if (!list) 841 return -EINVAL; 842 843 nla_for_each_nested(n, list, rem) { 844 struct sched_entry *entry; 845 846 if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) { 847 NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'"); 848 continue; 849 } 850 851 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 852 if (!entry) { 853 NL_SET_ERR_MSG(extack, "Not enough memory for entry"); 854 return -ENOMEM; 855 } 856 857 err = parse_sched_entry(q, n, entry, i, extack); 858 if (err < 0) { 859 kfree(entry); 860 return err; 861 } 862 863 list_add_tail(&entry->list, &sched->entries); 864 i++; 865 } 866 867 sched->num_entries = i; 868 869 return i; 870 } 871 872 static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb, 873 struct sched_gate_list *new, 874 struct netlink_ext_ack *extack) 875 { 876 int err = 0; 877 878 if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) { 879 NL_SET_ERR_MSG(extack, "Adding a single entry is not supported"); 880 return -ENOTSUPP; 881 } 882 883 if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]) 884 new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]); 885 886 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]) 887 new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]); 888 889 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]) 890 new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]); 891 892 if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]) 893 err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], 894 new, extack); 895 if (err < 0) 896 return err; 897 898 if (!new->cycle_time) { 899 struct sched_entry *entry; 900 ktime_t cycle = 0; 901 902 list_for_each_entry(entry, &new->entries, list) 903 cycle = ktime_add_ns(cycle, entry->interval); 904 new->cycle_time = cycle; 905 } 906 907 return 0; 908 } 909 910 static int taprio_parse_mqprio_opt(struct net_device *dev, 911 struct tc_mqprio_qopt *qopt, 912 struct netlink_ext_ack *extack, 913 u32 taprio_flags) 914 { 915 int i, j; 916 917 if (!qopt && !dev->num_tc) { 918 NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary"); 919 return -EINVAL; 920 } 921 922 /* If num_tc is already set, it means that the user already 923 * configured the mqprio part 924 */ 925 if (dev->num_tc) 926 return 0; 927 928 /* Verify num_tc is not out of max range */ 929 if (qopt->num_tc > TC_MAX_QUEUE) { 930 NL_SET_ERR_MSG(extack, "Number of traffic classes is outside valid range"); 931 return -EINVAL; 932 } 933 934 /* taprio imposes that traffic classes map 1:n to tx queues */ 935 if (qopt->num_tc > dev->num_tx_queues) { 936 NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues"); 937 return -EINVAL; 938 } 939 940 /* Verify priority mapping uses valid tcs */ 941 for (i = 0; i <= TC_BITMASK; i++) { 942 if (qopt->prio_tc_map[i] >= qopt->num_tc) { 943 NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping"); 944 return -EINVAL; 945 } 946 } 947 948 for (i = 0; i < qopt->num_tc; i++) { 949 unsigned int last = qopt->offset[i] + qopt->count[i]; 950 951 /* Verify the queue count is in tx range being equal to the 952 * real_num_tx_queues indicates the last queue is in use. 953 */ 954 if (qopt->offset[i] >= dev->num_tx_queues || 955 !qopt->count[i] || 956 last > dev->real_num_tx_queues) { 957 NL_SET_ERR_MSG(extack, "Invalid queue in traffic class to queue mapping"); 958 return -EINVAL; 959 } 960 961 if (TXTIME_ASSIST_IS_ENABLED(taprio_flags)) 962 continue; 963 964 /* Verify that the offset and counts do not overlap */ 965 for (j = i + 1; j < qopt->num_tc; j++) { 966 if (last > qopt->offset[j]) { 967 NL_SET_ERR_MSG(extack, "Detected overlap in the traffic class to queue mapping"); 968 return -EINVAL; 969 } 970 } 971 } 972 973 return 0; 974 } 975 976 static int taprio_get_start_time(struct Qdisc *sch, 977 struct sched_gate_list *sched, 978 ktime_t *start) 979 { 980 struct taprio_sched *q = qdisc_priv(sch); 981 ktime_t now, base, cycle; 982 s64 n; 983 984 base = sched_base_time(sched); 985 now = taprio_get_time(q); 986 987 if (ktime_after(base, now)) { 988 *start = base; 989 return 0; 990 } 991 992 cycle = sched->cycle_time; 993 994 /* The qdisc is expected to have at least one sched_entry. Moreover, 995 * any entry must have 'interval' > 0. Thus if the cycle time is zero, 996 * something went really wrong. In that case, we should warn about this 997 * inconsistent state and return error. 998 */ 999 if (WARN_ON(!cycle)) 1000 return -EFAULT; 1001 1002 /* Schedule the start time for the beginning of the next 1003 * cycle. 1004 */ 1005 n = div64_s64(ktime_sub_ns(now, base), cycle); 1006 *start = ktime_add_ns(base, (n + 1) * cycle); 1007 return 0; 1008 } 1009 1010 static void setup_first_close_time(struct taprio_sched *q, 1011 struct sched_gate_list *sched, ktime_t base) 1012 { 1013 struct sched_entry *first; 1014 ktime_t cycle; 1015 1016 first = list_first_entry(&sched->entries, 1017 struct sched_entry, list); 1018 1019 cycle = sched->cycle_time; 1020 1021 /* FIXME: find a better place to do this */ 1022 sched->cycle_close_time = ktime_add_ns(base, cycle); 1023 1024 first->close_time = ktime_add_ns(base, first->interval); 1025 taprio_set_budget(q, first); 1026 rcu_assign_pointer(q->current_entry, NULL); 1027 } 1028 1029 static void taprio_start_sched(struct Qdisc *sch, 1030 ktime_t start, struct sched_gate_list *new) 1031 { 1032 struct taprio_sched *q = qdisc_priv(sch); 1033 ktime_t expires; 1034 1035 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) 1036 return; 1037 1038 expires = hrtimer_get_expires(&q->advance_timer); 1039 if (expires == 0) 1040 expires = KTIME_MAX; 1041 1042 /* If the new schedule starts before the next expiration, we 1043 * reprogram it to the earliest one, so we change the admin 1044 * schedule to the operational one at the right time. 1045 */ 1046 start = min_t(ktime_t, start, expires); 1047 1048 hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS); 1049 } 1050 1051 static void taprio_set_picos_per_byte(struct net_device *dev, 1052 struct taprio_sched *q) 1053 { 1054 struct ethtool_link_ksettings ecmd; 1055 int speed = SPEED_10; 1056 int picos_per_byte; 1057 int err; 1058 1059 err = __ethtool_get_link_ksettings(dev, &ecmd); 1060 if (err < 0) 1061 goto skip; 1062 1063 if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN) 1064 speed = ecmd.base.speed; 1065 1066 skip: 1067 picos_per_byte = (USEC_PER_SEC * 8) / speed; 1068 1069 atomic64_set(&q->picos_per_byte, picos_per_byte); 1070 netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n", 1071 dev->name, (long long)atomic64_read(&q->picos_per_byte), 1072 ecmd.base.speed); 1073 } 1074 1075 static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event, 1076 void *ptr) 1077 { 1078 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1079 struct net_device *qdev; 1080 struct taprio_sched *q; 1081 bool found = false; 1082 1083 ASSERT_RTNL(); 1084 1085 if (event != NETDEV_UP && event != NETDEV_CHANGE) 1086 return NOTIFY_DONE; 1087 1088 spin_lock(&taprio_list_lock); 1089 list_for_each_entry(q, &taprio_list, taprio_list) { 1090 qdev = qdisc_dev(q->root); 1091 if (qdev == dev) { 1092 found = true; 1093 break; 1094 } 1095 } 1096 spin_unlock(&taprio_list_lock); 1097 1098 if (found) 1099 taprio_set_picos_per_byte(dev, q); 1100 1101 return NOTIFY_DONE; 1102 } 1103 1104 static void setup_txtime(struct taprio_sched *q, 1105 struct sched_gate_list *sched, ktime_t base) 1106 { 1107 struct sched_entry *entry; 1108 u32 interval = 0; 1109 1110 list_for_each_entry(entry, &sched->entries, list) { 1111 entry->next_txtime = ktime_add_ns(base, interval); 1112 interval += entry->interval; 1113 } 1114 } 1115 1116 static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries) 1117 { 1118 struct __tc_taprio_qopt_offload *__offload; 1119 1120 __offload = kzalloc(struct_size(__offload, offload.entries, num_entries), 1121 GFP_KERNEL); 1122 if (!__offload) 1123 return NULL; 1124 1125 refcount_set(&__offload->users, 1); 1126 1127 return &__offload->offload; 1128 } 1129 1130 struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload 1131 *offload) 1132 { 1133 struct __tc_taprio_qopt_offload *__offload; 1134 1135 __offload = container_of(offload, struct __tc_taprio_qopt_offload, 1136 offload); 1137 1138 refcount_inc(&__offload->users); 1139 1140 return offload; 1141 } 1142 EXPORT_SYMBOL_GPL(taprio_offload_get); 1143 1144 void taprio_offload_free(struct tc_taprio_qopt_offload *offload) 1145 { 1146 struct __tc_taprio_qopt_offload *__offload; 1147 1148 __offload = container_of(offload, struct __tc_taprio_qopt_offload, 1149 offload); 1150 1151 if (!refcount_dec_and_test(&__offload->users)) 1152 return; 1153 1154 kfree(__offload); 1155 } 1156 EXPORT_SYMBOL_GPL(taprio_offload_free); 1157 1158 /* The function will only serve to keep the pointers to the "oper" and "admin" 1159 * schedules valid in relation to their base times, so when calling dump() the 1160 * users looks at the right schedules. 1161 * When using full offload, the admin configuration is promoted to oper at the 1162 * base_time in the PHC time domain. But because the system time is not 1163 * necessarily in sync with that, we can't just trigger a hrtimer to call 1164 * switch_schedules at the right hardware time. 1165 * At the moment we call this by hand right away from taprio, but in the future 1166 * it will be useful to create a mechanism for drivers to notify taprio of the 1167 * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump(). 1168 * This is left as TODO. 1169 */ 1170 static void taprio_offload_config_changed(struct taprio_sched *q) 1171 { 1172 struct sched_gate_list *oper, *admin; 1173 1174 spin_lock(&q->current_entry_lock); 1175 1176 oper = rcu_dereference_protected(q->oper_sched, 1177 lockdep_is_held(&q->current_entry_lock)); 1178 admin = rcu_dereference_protected(q->admin_sched, 1179 lockdep_is_held(&q->current_entry_lock)); 1180 1181 switch_schedules(q, &admin, &oper); 1182 1183 spin_unlock(&q->current_entry_lock); 1184 } 1185 1186 static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask) 1187 { 1188 u32 i, queue_mask = 0; 1189 1190 for (i = 0; i < dev->num_tc; i++) { 1191 u32 offset, count; 1192 1193 if (!(tc_mask & BIT(i))) 1194 continue; 1195 1196 offset = dev->tc_to_txq[i].offset; 1197 count = dev->tc_to_txq[i].count; 1198 1199 queue_mask |= GENMASK(offset + count - 1, offset); 1200 } 1201 1202 return queue_mask; 1203 } 1204 1205 static void taprio_sched_to_offload(struct net_device *dev, 1206 struct sched_gate_list *sched, 1207 struct tc_taprio_qopt_offload *offload) 1208 { 1209 struct sched_entry *entry; 1210 int i = 0; 1211 1212 offload->base_time = sched->base_time; 1213 offload->cycle_time = sched->cycle_time; 1214 offload->cycle_time_extension = sched->cycle_time_extension; 1215 1216 list_for_each_entry(entry, &sched->entries, list) { 1217 struct tc_taprio_sched_entry *e = &offload->entries[i]; 1218 1219 e->command = entry->command; 1220 e->interval = entry->interval; 1221 e->gate_mask = tc_map_to_queue_mask(dev, entry->gate_mask); 1222 1223 i++; 1224 } 1225 1226 offload->num_entries = i; 1227 } 1228 1229 static int taprio_enable_offload(struct net_device *dev, 1230 struct taprio_sched *q, 1231 struct sched_gate_list *sched, 1232 struct netlink_ext_ack *extack) 1233 { 1234 const struct net_device_ops *ops = dev->netdev_ops; 1235 struct tc_taprio_qopt_offload *offload; 1236 int err = 0; 1237 1238 if (!ops->ndo_setup_tc) { 1239 NL_SET_ERR_MSG(extack, 1240 "Device does not support taprio offload"); 1241 return -EOPNOTSUPP; 1242 } 1243 1244 offload = taprio_offload_alloc(sched->num_entries); 1245 if (!offload) { 1246 NL_SET_ERR_MSG(extack, 1247 "Not enough memory for enabling offload mode"); 1248 return -ENOMEM; 1249 } 1250 offload->enable = 1; 1251 taprio_sched_to_offload(dev, sched, offload); 1252 1253 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); 1254 if (err < 0) { 1255 NL_SET_ERR_MSG(extack, 1256 "Device failed to setup taprio offload"); 1257 goto done; 1258 } 1259 1260 done: 1261 taprio_offload_free(offload); 1262 1263 return err; 1264 } 1265 1266 static int taprio_disable_offload(struct net_device *dev, 1267 struct taprio_sched *q, 1268 struct netlink_ext_ack *extack) 1269 { 1270 const struct net_device_ops *ops = dev->netdev_ops; 1271 struct tc_taprio_qopt_offload *offload; 1272 int err; 1273 1274 if (!FULL_OFFLOAD_IS_ENABLED(q->flags)) 1275 return 0; 1276 1277 if (!ops->ndo_setup_tc) 1278 return -EOPNOTSUPP; 1279 1280 offload = taprio_offload_alloc(0); 1281 if (!offload) { 1282 NL_SET_ERR_MSG(extack, 1283 "Not enough memory to disable offload mode"); 1284 return -ENOMEM; 1285 } 1286 offload->enable = 0; 1287 1288 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); 1289 if (err < 0) { 1290 NL_SET_ERR_MSG(extack, 1291 "Device failed to disable offload"); 1292 goto out; 1293 } 1294 1295 out: 1296 taprio_offload_free(offload); 1297 1298 return err; 1299 } 1300 1301 /* If full offload is enabled, the only possible clockid is the net device's 1302 * PHC. For that reason, specifying a clockid through netlink is incorrect. 1303 * For txtime-assist, it is implicitly assumed that the device's PHC is kept 1304 * in sync with the specified clockid via a user space daemon such as phc2sys. 1305 * For both software taprio and txtime-assist, the clockid is used for the 1306 * hrtimer that advances the schedule and hence mandatory. 1307 */ 1308 static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb, 1309 struct netlink_ext_ack *extack) 1310 { 1311 struct taprio_sched *q = qdisc_priv(sch); 1312 struct net_device *dev = qdisc_dev(sch); 1313 int err = -EINVAL; 1314 1315 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { 1316 const struct ethtool_ops *ops = dev->ethtool_ops; 1317 struct ethtool_ts_info info = { 1318 .cmd = ETHTOOL_GET_TS_INFO, 1319 .phc_index = -1, 1320 }; 1321 1322 if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) { 1323 NL_SET_ERR_MSG(extack, 1324 "The 'clockid' cannot be specified for full offload"); 1325 goto out; 1326 } 1327 1328 if (ops && ops->get_ts_info) 1329 err = ops->get_ts_info(dev, &info); 1330 1331 if (err || info.phc_index < 0) { 1332 NL_SET_ERR_MSG(extack, 1333 "Device does not have a PTP clock"); 1334 err = -ENOTSUPP; 1335 goto out; 1336 } 1337 } else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) { 1338 int clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]); 1339 1340 /* We only support static clockids and we don't allow 1341 * for it to be modified after the first init. 1342 */ 1343 if (clockid < 0 || 1344 (q->clockid != -1 && q->clockid != clockid)) { 1345 NL_SET_ERR_MSG(extack, 1346 "Changing the 'clockid' of a running schedule is not supported"); 1347 err = -ENOTSUPP; 1348 goto out; 1349 } 1350 1351 switch (clockid) { 1352 case CLOCK_REALTIME: 1353 q->tk_offset = TK_OFFS_REAL; 1354 break; 1355 case CLOCK_MONOTONIC: 1356 q->tk_offset = TK_OFFS_MAX; 1357 break; 1358 case CLOCK_BOOTTIME: 1359 q->tk_offset = TK_OFFS_BOOT; 1360 break; 1361 case CLOCK_TAI: 1362 q->tk_offset = TK_OFFS_TAI; 1363 break; 1364 default: 1365 NL_SET_ERR_MSG(extack, "Invalid 'clockid'"); 1366 err = -EINVAL; 1367 goto out; 1368 } 1369 1370 q->clockid = clockid; 1371 } else { 1372 NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory"); 1373 goto out; 1374 } 1375 1376 /* Everything went ok, return success. */ 1377 err = 0; 1378 1379 out: 1380 return err; 1381 } 1382 1383 static int taprio_mqprio_cmp(const struct net_device *dev, 1384 const struct tc_mqprio_qopt *mqprio) 1385 { 1386 int i; 1387 1388 if (!mqprio || mqprio->num_tc != dev->num_tc) 1389 return -1; 1390 1391 for (i = 0; i < mqprio->num_tc; i++) 1392 if (dev->tc_to_txq[i].count != mqprio->count[i] || 1393 dev->tc_to_txq[i].offset != mqprio->offset[i]) 1394 return -1; 1395 1396 for (i = 0; i <= TC_BITMASK; i++) 1397 if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i]) 1398 return -1; 1399 1400 return 0; 1401 } 1402 1403 /* The semantics of the 'flags' argument in relation to 'change()' 1404 * requests, are interpreted following two rules (which are applied in 1405 * this order): (1) an omitted 'flags' argument is interpreted as 1406 * zero; (2) the 'flags' of a "running" taprio instance cannot be 1407 * changed. 1408 */ 1409 static int taprio_new_flags(const struct nlattr *attr, u32 old, 1410 struct netlink_ext_ack *extack) 1411 { 1412 u32 new = 0; 1413 1414 if (attr) 1415 new = nla_get_u32(attr); 1416 1417 if (old != TAPRIO_FLAGS_INVALID && old != new) { 1418 NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported"); 1419 return -EOPNOTSUPP; 1420 } 1421 1422 if (!taprio_flags_valid(new)) { 1423 NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid"); 1424 return -EINVAL; 1425 } 1426 1427 return new; 1428 } 1429 1430 static int taprio_change(struct Qdisc *sch, struct nlattr *opt, 1431 struct netlink_ext_ack *extack) 1432 { 1433 struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { }; 1434 struct sched_gate_list *oper, *admin, *new_admin; 1435 struct taprio_sched *q = qdisc_priv(sch); 1436 struct net_device *dev = qdisc_dev(sch); 1437 struct tc_mqprio_qopt *mqprio = NULL; 1438 unsigned long flags; 1439 ktime_t start; 1440 int i, err; 1441 1442 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt, 1443 taprio_policy, extack); 1444 if (err < 0) 1445 return err; 1446 1447 if (tb[TCA_TAPRIO_ATTR_PRIOMAP]) 1448 mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]); 1449 1450 err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS], 1451 q->flags, extack); 1452 if (err < 0) 1453 return err; 1454 1455 q->flags = err; 1456 1457 err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags); 1458 if (err < 0) 1459 return err; 1460 1461 new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL); 1462 if (!new_admin) { 1463 NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule"); 1464 return -ENOMEM; 1465 } 1466 INIT_LIST_HEAD(&new_admin->entries); 1467 1468 rcu_read_lock(); 1469 oper = rcu_dereference(q->oper_sched); 1470 admin = rcu_dereference(q->admin_sched); 1471 rcu_read_unlock(); 1472 1473 /* no changes - no new mqprio settings */ 1474 if (!taprio_mqprio_cmp(dev, mqprio)) 1475 mqprio = NULL; 1476 1477 if (mqprio && (oper || admin)) { 1478 NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported"); 1479 err = -ENOTSUPP; 1480 goto free_sched; 1481 } 1482 1483 err = parse_taprio_schedule(q, tb, new_admin, extack); 1484 if (err < 0) 1485 goto free_sched; 1486 1487 if (new_admin->num_entries == 0) { 1488 NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule"); 1489 err = -EINVAL; 1490 goto free_sched; 1491 } 1492 1493 err = taprio_parse_clockid(sch, tb, extack); 1494 if (err < 0) 1495 goto free_sched; 1496 1497 taprio_set_picos_per_byte(dev, q); 1498 1499 if (mqprio) { 1500 netdev_set_num_tc(dev, mqprio->num_tc); 1501 for (i = 0; i < mqprio->num_tc; i++) 1502 netdev_set_tc_queue(dev, i, 1503 mqprio->count[i], 1504 mqprio->offset[i]); 1505 1506 /* Always use supplied priority mappings */ 1507 for (i = 0; i <= TC_BITMASK; i++) 1508 netdev_set_prio_tc_map(dev, i, 1509 mqprio->prio_tc_map[i]); 1510 } 1511 1512 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) 1513 err = taprio_enable_offload(dev, q, new_admin, extack); 1514 else 1515 err = taprio_disable_offload(dev, q, extack); 1516 if (err) 1517 goto free_sched; 1518 1519 /* Protects against enqueue()/dequeue() */ 1520 spin_lock_bh(qdisc_lock(sch)); 1521 1522 if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) { 1523 if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) { 1524 NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled"); 1525 err = -EINVAL; 1526 goto unlock; 1527 } 1528 1529 q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]); 1530 } 1531 1532 if (!TXTIME_ASSIST_IS_ENABLED(q->flags) && 1533 !FULL_OFFLOAD_IS_ENABLED(q->flags) && 1534 !hrtimer_active(&q->advance_timer)) { 1535 hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS); 1536 q->advance_timer.function = advance_sched; 1537 } 1538 1539 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { 1540 q->dequeue = taprio_dequeue_offload; 1541 q->peek = taprio_peek_offload; 1542 } else { 1543 /* Be sure to always keep the function pointers 1544 * in a consistent state. 1545 */ 1546 q->dequeue = taprio_dequeue_soft; 1547 q->peek = taprio_peek_soft; 1548 } 1549 1550 err = taprio_get_start_time(sch, new_admin, &start); 1551 if (err < 0) { 1552 NL_SET_ERR_MSG(extack, "Internal error: failed get start time"); 1553 goto unlock; 1554 } 1555 1556 setup_txtime(q, new_admin, start); 1557 1558 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { 1559 if (!oper) { 1560 rcu_assign_pointer(q->oper_sched, new_admin); 1561 err = 0; 1562 new_admin = NULL; 1563 goto unlock; 1564 } 1565 1566 rcu_assign_pointer(q->admin_sched, new_admin); 1567 if (admin) 1568 call_rcu(&admin->rcu, taprio_free_sched_cb); 1569 } else { 1570 setup_first_close_time(q, new_admin, start); 1571 1572 /* Protects against advance_sched() */ 1573 spin_lock_irqsave(&q->current_entry_lock, flags); 1574 1575 taprio_start_sched(sch, start, new_admin); 1576 1577 rcu_assign_pointer(q->admin_sched, new_admin); 1578 if (admin) 1579 call_rcu(&admin->rcu, taprio_free_sched_cb); 1580 1581 spin_unlock_irqrestore(&q->current_entry_lock, flags); 1582 1583 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) 1584 taprio_offload_config_changed(q); 1585 } 1586 1587 new_admin = NULL; 1588 err = 0; 1589 1590 unlock: 1591 spin_unlock_bh(qdisc_lock(sch)); 1592 1593 free_sched: 1594 if (new_admin) 1595 call_rcu(&new_admin->rcu, taprio_free_sched_cb); 1596 1597 return err; 1598 } 1599 1600 static void taprio_reset(struct Qdisc *sch) 1601 { 1602 struct taprio_sched *q = qdisc_priv(sch); 1603 struct net_device *dev = qdisc_dev(sch); 1604 int i; 1605 1606 hrtimer_cancel(&q->advance_timer); 1607 if (q->qdiscs) { 1608 for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++) 1609 qdisc_reset(q->qdiscs[i]); 1610 } 1611 sch->qstats.backlog = 0; 1612 sch->q.qlen = 0; 1613 } 1614 1615 static void taprio_destroy(struct Qdisc *sch) 1616 { 1617 struct taprio_sched *q = qdisc_priv(sch); 1618 struct net_device *dev = qdisc_dev(sch); 1619 unsigned int i; 1620 1621 spin_lock(&taprio_list_lock); 1622 list_del(&q->taprio_list); 1623 spin_unlock(&taprio_list_lock); 1624 1625 1626 taprio_disable_offload(dev, q, NULL); 1627 1628 if (q->qdiscs) { 1629 for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++) 1630 qdisc_put(q->qdiscs[i]); 1631 1632 kfree(q->qdiscs); 1633 } 1634 q->qdiscs = NULL; 1635 1636 netdev_reset_tc(dev); 1637 1638 if (q->oper_sched) 1639 call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb); 1640 1641 if (q->admin_sched) 1642 call_rcu(&q->admin_sched->rcu, taprio_free_sched_cb); 1643 } 1644 1645 static int taprio_init(struct Qdisc *sch, struct nlattr *opt, 1646 struct netlink_ext_ack *extack) 1647 { 1648 struct taprio_sched *q = qdisc_priv(sch); 1649 struct net_device *dev = qdisc_dev(sch); 1650 int i; 1651 1652 spin_lock_init(&q->current_entry_lock); 1653 1654 hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS); 1655 q->advance_timer.function = advance_sched; 1656 1657 q->dequeue = taprio_dequeue_soft; 1658 q->peek = taprio_peek_soft; 1659 1660 q->root = sch; 1661 1662 /* We only support static clockids. Use an invalid value as default 1663 * and get the valid one on taprio_change(). 1664 */ 1665 q->clockid = -1; 1666 q->flags = TAPRIO_FLAGS_INVALID; 1667 1668 spin_lock(&taprio_list_lock); 1669 list_add(&q->taprio_list, &taprio_list); 1670 spin_unlock(&taprio_list_lock); 1671 1672 if (sch->parent != TC_H_ROOT) 1673 return -EOPNOTSUPP; 1674 1675 if (!netif_is_multiqueue(dev)) 1676 return -EOPNOTSUPP; 1677 1678 /* pre-allocate qdisc, attachment can't fail */ 1679 q->qdiscs = kcalloc(dev->num_tx_queues, 1680 sizeof(q->qdiscs[0]), 1681 GFP_KERNEL); 1682 1683 if (!q->qdiscs) 1684 return -ENOMEM; 1685 1686 if (!opt) 1687 return -EINVAL; 1688 1689 for (i = 0; i < dev->num_tx_queues; i++) { 1690 struct netdev_queue *dev_queue; 1691 struct Qdisc *qdisc; 1692 1693 dev_queue = netdev_get_tx_queue(dev, i); 1694 qdisc = qdisc_create_dflt(dev_queue, 1695 &pfifo_qdisc_ops, 1696 TC_H_MAKE(TC_H_MAJ(sch->handle), 1697 TC_H_MIN(i + 1)), 1698 extack); 1699 if (!qdisc) 1700 return -ENOMEM; 1701 1702 if (i < dev->real_num_tx_queues) 1703 qdisc_hash_add(qdisc, false); 1704 1705 q->qdiscs[i] = qdisc; 1706 } 1707 1708 return taprio_change(sch, opt, extack); 1709 } 1710 1711 static struct netdev_queue *taprio_queue_get(struct Qdisc *sch, 1712 unsigned long cl) 1713 { 1714 struct net_device *dev = qdisc_dev(sch); 1715 unsigned long ntx = cl - 1; 1716 1717 if (ntx >= dev->num_tx_queues) 1718 return NULL; 1719 1720 return netdev_get_tx_queue(dev, ntx); 1721 } 1722 1723 static int taprio_graft(struct Qdisc *sch, unsigned long cl, 1724 struct Qdisc *new, struct Qdisc **old, 1725 struct netlink_ext_ack *extack) 1726 { 1727 struct taprio_sched *q = qdisc_priv(sch); 1728 struct net_device *dev = qdisc_dev(sch); 1729 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); 1730 1731 if (!dev_queue) 1732 return -EINVAL; 1733 1734 if (dev->flags & IFF_UP) 1735 dev_deactivate(dev); 1736 1737 *old = q->qdiscs[cl - 1]; 1738 q->qdiscs[cl - 1] = new; 1739 1740 if (new) 1741 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 1742 1743 if (dev->flags & IFF_UP) 1744 dev_activate(dev); 1745 1746 return 0; 1747 } 1748 1749 static int dump_entry(struct sk_buff *msg, 1750 const struct sched_entry *entry) 1751 { 1752 struct nlattr *item; 1753 1754 item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY); 1755 if (!item) 1756 return -ENOSPC; 1757 1758 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index)) 1759 goto nla_put_failure; 1760 1761 if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command)) 1762 goto nla_put_failure; 1763 1764 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, 1765 entry->gate_mask)) 1766 goto nla_put_failure; 1767 1768 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL, 1769 entry->interval)) 1770 goto nla_put_failure; 1771 1772 return nla_nest_end(msg, item); 1773 1774 nla_put_failure: 1775 nla_nest_cancel(msg, item); 1776 return -1; 1777 } 1778 1779 static int dump_schedule(struct sk_buff *msg, 1780 const struct sched_gate_list *root) 1781 { 1782 struct nlattr *entry_list; 1783 struct sched_entry *entry; 1784 1785 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME, 1786 root->base_time, TCA_TAPRIO_PAD)) 1787 return -1; 1788 1789 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME, 1790 root->cycle_time, TCA_TAPRIO_PAD)) 1791 return -1; 1792 1793 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, 1794 root->cycle_time_extension, TCA_TAPRIO_PAD)) 1795 return -1; 1796 1797 entry_list = nla_nest_start_noflag(msg, 1798 TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST); 1799 if (!entry_list) 1800 goto error_nest; 1801 1802 list_for_each_entry(entry, &root->entries, list) { 1803 if (dump_entry(msg, entry) < 0) 1804 goto error_nest; 1805 } 1806 1807 nla_nest_end(msg, entry_list); 1808 return 0; 1809 1810 error_nest: 1811 nla_nest_cancel(msg, entry_list); 1812 return -1; 1813 } 1814 1815 static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb) 1816 { 1817 struct taprio_sched *q = qdisc_priv(sch); 1818 struct net_device *dev = qdisc_dev(sch); 1819 struct sched_gate_list *oper, *admin; 1820 struct tc_mqprio_qopt opt = { 0 }; 1821 struct nlattr *nest, *sched_nest; 1822 unsigned int i; 1823 1824 rcu_read_lock(); 1825 oper = rcu_dereference(q->oper_sched); 1826 admin = rcu_dereference(q->admin_sched); 1827 1828 opt.num_tc = netdev_get_num_tc(dev); 1829 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map)); 1830 1831 for (i = 0; i < netdev_get_num_tc(dev); i++) { 1832 opt.count[i] = dev->tc_to_txq[i].count; 1833 opt.offset[i] = dev->tc_to_txq[i].offset; 1834 } 1835 1836 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 1837 if (!nest) 1838 goto start_error; 1839 1840 if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt)) 1841 goto options_error; 1842 1843 if (!FULL_OFFLOAD_IS_ENABLED(q->flags) && 1844 nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid)) 1845 goto options_error; 1846 1847 if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags)) 1848 goto options_error; 1849 1850 if (q->txtime_delay && 1851 nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay)) 1852 goto options_error; 1853 1854 if (oper && dump_schedule(skb, oper)) 1855 goto options_error; 1856 1857 if (!admin) 1858 goto done; 1859 1860 sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED); 1861 if (!sched_nest) 1862 goto options_error; 1863 1864 if (dump_schedule(skb, admin)) 1865 goto admin_error; 1866 1867 nla_nest_end(skb, sched_nest); 1868 1869 done: 1870 rcu_read_unlock(); 1871 1872 return nla_nest_end(skb, nest); 1873 1874 admin_error: 1875 nla_nest_cancel(skb, sched_nest); 1876 1877 options_error: 1878 nla_nest_cancel(skb, nest); 1879 1880 start_error: 1881 rcu_read_unlock(); 1882 return -ENOSPC; 1883 } 1884 1885 static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl) 1886 { 1887 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); 1888 1889 if (!dev_queue) 1890 return NULL; 1891 1892 return dev_queue->qdisc_sleeping; 1893 } 1894 1895 static unsigned long taprio_find(struct Qdisc *sch, u32 classid) 1896 { 1897 unsigned int ntx = TC_H_MIN(classid); 1898 1899 if (!taprio_queue_get(sch, ntx)) 1900 return 0; 1901 return ntx; 1902 } 1903 1904 static int taprio_dump_class(struct Qdisc *sch, unsigned long cl, 1905 struct sk_buff *skb, struct tcmsg *tcm) 1906 { 1907 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); 1908 1909 tcm->tcm_parent = TC_H_ROOT; 1910 tcm->tcm_handle |= TC_H_MIN(cl); 1911 tcm->tcm_info = dev_queue->qdisc_sleeping->handle; 1912 1913 return 0; 1914 } 1915 1916 static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, 1917 struct gnet_dump *d) 1918 __releases(d->lock) 1919 __acquires(d->lock) 1920 { 1921 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); 1922 1923 sch = dev_queue->qdisc_sleeping; 1924 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 || 1925 qdisc_qstats_copy(d, sch) < 0) 1926 return -1; 1927 return 0; 1928 } 1929 1930 static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg) 1931 { 1932 struct net_device *dev = qdisc_dev(sch); 1933 unsigned long ntx; 1934 1935 if (arg->stop) 1936 return; 1937 1938 arg->count = arg->skip; 1939 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) { 1940 if (arg->fn(sch, ntx + 1, arg) < 0) { 1941 arg->stop = 1; 1942 break; 1943 } 1944 arg->count++; 1945 } 1946 } 1947 1948 static struct netdev_queue *taprio_select_queue(struct Qdisc *sch, 1949 struct tcmsg *tcm) 1950 { 1951 return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent)); 1952 } 1953 1954 static const struct Qdisc_class_ops taprio_class_ops = { 1955 .graft = taprio_graft, 1956 .leaf = taprio_leaf, 1957 .find = taprio_find, 1958 .walk = taprio_walk, 1959 .dump = taprio_dump_class, 1960 .dump_stats = taprio_dump_class_stats, 1961 .select_queue = taprio_select_queue, 1962 }; 1963 1964 static struct Qdisc_ops taprio_qdisc_ops __read_mostly = { 1965 .cl_ops = &taprio_class_ops, 1966 .id = "taprio", 1967 .priv_size = sizeof(struct taprio_sched), 1968 .init = taprio_init, 1969 .change = taprio_change, 1970 .destroy = taprio_destroy, 1971 .reset = taprio_reset, 1972 .peek = taprio_peek, 1973 .dequeue = taprio_dequeue, 1974 .enqueue = taprio_enqueue, 1975 .dump = taprio_dump, 1976 .owner = THIS_MODULE, 1977 }; 1978 1979 static struct notifier_block taprio_device_notifier = { 1980 .notifier_call = taprio_dev_notifier, 1981 }; 1982 1983 static int __init taprio_module_init(void) 1984 { 1985 int err = register_netdevice_notifier(&taprio_device_notifier); 1986 1987 if (err) 1988 return err; 1989 1990 return register_qdisc(&taprio_qdisc_ops); 1991 } 1992 1993 static void __exit taprio_module_exit(void) 1994 { 1995 unregister_qdisc(&taprio_qdisc_ops); 1996 unregister_netdevice_notifier(&taprio_device_notifier); 1997 } 1998 1999 module_init(taprio_module_init); 2000 module_exit(taprio_module_exit); 2001 MODULE_LICENSE("GPL"); 2002