1 // SPDX-License-Identifier: GPL-2.0 2 3 /* net/sched/sch_taprio.c Time Aware Priority Scheduler 4 * 5 * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com> 6 * 7 */ 8 9 #include <linux/ethtool.h> 10 #include <linux/types.h> 11 #include <linux/slab.h> 12 #include <linux/kernel.h> 13 #include <linux/string.h> 14 #include <linux/list.h> 15 #include <linux/errno.h> 16 #include <linux/skbuff.h> 17 #include <linux/math64.h> 18 #include <linux/module.h> 19 #include <linux/spinlock.h> 20 #include <linux/rcupdate.h> 21 #include <linux/time.h> 22 #include <net/netlink.h> 23 #include <net/pkt_sched.h> 24 #include <net/pkt_cls.h> 25 #include <net/sch_generic.h> 26 #include <net/sock.h> 27 #include <net/tcp.h> 28 29 static LIST_HEAD(taprio_list); 30 31 #define TAPRIO_ALL_GATES_OPEN -1 32 33 #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) 34 #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD) 35 #define TAPRIO_FLAGS_INVALID U32_MAX 36 37 struct sched_entry { 38 struct list_head list; 39 40 /* The instant that this entry "closes" and the next one 41 * should open, the qdisc will make some effort so that no 42 * packet leaves after this time. 43 */ 44 ktime_t close_time; 45 ktime_t next_txtime; 46 atomic_t budget; 47 int index; 48 u32 gate_mask; 49 u32 interval; 50 u8 command; 51 }; 52 53 struct sched_gate_list { 54 struct rcu_head rcu; 55 struct list_head entries; 56 size_t num_entries; 57 ktime_t cycle_close_time; 58 s64 cycle_time; 59 s64 cycle_time_extension; 60 s64 base_time; 61 }; 62 63 struct taprio_sched { 64 struct Qdisc **qdiscs; 65 struct Qdisc *root; 66 u32 flags; 67 enum tk_offsets tk_offset; 68 int clockid; 69 bool offloaded; 70 atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+ 71 * speeds it's sub-nanoseconds per byte 72 */ 73 74 /* Protects the update side of the RCU protected current_entry */ 75 spinlock_t current_entry_lock; 76 struct sched_entry __rcu *current_entry; 77 struct sched_gate_list __rcu *oper_sched; 78 struct sched_gate_list __rcu *admin_sched; 79 struct hrtimer advance_timer; 80 struct list_head taprio_list; 81 u32 txtime_delay; 82 }; 83 84 struct __tc_taprio_qopt_offload { 85 refcount_t users; 86 struct tc_taprio_qopt_offload offload; 87 }; 88 89 static ktime_t sched_base_time(const struct sched_gate_list *sched) 90 { 91 if (!sched) 92 return KTIME_MAX; 93 94 return ns_to_ktime(sched->base_time); 95 } 96 97 static ktime_t taprio_mono_to_any(const struct taprio_sched *q, ktime_t mono) 98 { 99 /* This pairs with WRITE_ONCE() in taprio_parse_clockid() */ 100 enum tk_offsets tk_offset = READ_ONCE(q->tk_offset); 101 102 switch (tk_offset) { 103 case TK_OFFS_MAX: 104 return mono; 105 default: 106 return ktime_mono_to_any(mono, tk_offset); 107 } 108 } 109 110 static ktime_t taprio_get_time(const struct taprio_sched *q) 111 { 112 return taprio_mono_to_any(q, ktime_get()); 113 } 114 115 static void taprio_free_sched_cb(struct rcu_head *head) 116 { 117 struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu); 118 struct sched_entry *entry, *n; 119 120 list_for_each_entry_safe(entry, n, &sched->entries, list) { 121 list_del(&entry->list); 122 kfree(entry); 123 } 124 125 kfree(sched); 126 } 127 128 static void switch_schedules(struct taprio_sched *q, 129 struct sched_gate_list **admin, 130 struct sched_gate_list **oper) 131 { 132 rcu_assign_pointer(q->oper_sched, *admin); 133 rcu_assign_pointer(q->admin_sched, NULL); 134 135 if (*oper) 136 call_rcu(&(*oper)->rcu, taprio_free_sched_cb); 137 138 *oper = *admin; 139 *admin = NULL; 140 } 141 142 /* Get how much time has been already elapsed in the current cycle. */ 143 static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time) 144 { 145 ktime_t time_since_sched_start; 146 s32 time_elapsed; 147 148 time_since_sched_start = ktime_sub(time, sched->base_time); 149 div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed); 150 151 return time_elapsed; 152 } 153 154 static ktime_t get_interval_end_time(struct sched_gate_list *sched, 155 struct sched_gate_list *admin, 156 struct sched_entry *entry, 157 ktime_t intv_start) 158 { 159 s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start); 160 ktime_t intv_end, cycle_ext_end, cycle_end; 161 162 cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed); 163 intv_end = ktime_add_ns(intv_start, entry->interval); 164 cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension); 165 166 if (ktime_before(intv_end, cycle_end)) 167 return intv_end; 168 else if (admin && admin != sched && 169 ktime_after(admin->base_time, cycle_end) && 170 ktime_before(admin->base_time, cycle_ext_end)) 171 return admin->base_time; 172 else 173 return cycle_end; 174 } 175 176 static int length_to_duration(struct taprio_sched *q, int len) 177 { 178 return div_u64(len * atomic64_read(&q->picos_per_byte), PSEC_PER_NSEC); 179 } 180 181 /* Returns the entry corresponding to next available interval. If 182 * validate_interval is set, it only validates whether the timestamp occurs 183 * when the gate corresponding to the skb's traffic class is open. 184 */ 185 static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb, 186 struct Qdisc *sch, 187 struct sched_gate_list *sched, 188 struct sched_gate_list *admin, 189 ktime_t time, 190 ktime_t *interval_start, 191 ktime_t *interval_end, 192 bool validate_interval) 193 { 194 ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time; 195 ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time; 196 struct sched_entry *entry = NULL, *entry_found = NULL; 197 struct taprio_sched *q = qdisc_priv(sch); 198 struct net_device *dev = qdisc_dev(sch); 199 bool entry_available = false; 200 s32 cycle_elapsed; 201 int tc, n; 202 203 tc = netdev_get_prio_tc_map(dev, skb->priority); 204 packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb)); 205 206 *interval_start = 0; 207 *interval_end = 0; 208 209 if (!sched) 210 return NULL; 211 212 cycle = sched->cycle_time; 213 cycle_elapsed = get_cycle_time_elapsed(sched, time); 214 curr_intv_end = ktime_sub_ns(time, cycle_elapsed); 215 cycle_end = ktime_add_ns(curr_intv_end, cycle); 216 217 list_for_each_entry(entry, &sched->entries, list) { 218 curr_intv_start = curr_intv_end; 219 curr_intv_end = get_interval_end_time(sched, admin, entry, 220 curr_intv_start); 221 222 if (ktime_after(curr_intv_start, cycle_end)) 223 break; 224 225 if (!(entry->gate_mask & BIT(tc)) || 226 packet_transmit_time > entry->interval) 227 continue; 228 229 txtime = entry->next_txtime; 230 231 if (ktime_before(txtime, time) || validate_interval) { 232 transmit_end_time = ktime_add_ns(time, packet_transmit_time); 233 if ((ktime_before(curr_intv_start, time) && 234 ktime_before(transmit_end_time, curr_intv_end)) || 235 (ktime_after(curr_intv_start, time) && !validate_interval)) { 236 entry_found = entry; 237 *interval_start = curr_intv_start; 238 *interval_end = curr_intv_end; 239 break; 240 } else if (!entry_available && !validate_interval) { 241 /* Here, we are just trying to find out the 242 * first available interval in the next cycle. 243 */ 244 entry_available = true; 245 entry_found = entry; 246 *interval_start = ktime_add_ns(curr_intv_start, cycle); 247 *interval_end = ktime_add_ns(curr_intv_end, cycle); 248 } 249 } else if (ktime_before(txtime, earliest_txtime) && 250 !entry_available) { 251 earliest_txtime = txtime; 252 entry_found = entry; 253 n = div_s64(ktime_sub(txtime, curr_intv_start), cycle); 254 *interval_start = ktime_add(curr_intv_start, n * cycle); 255 *interval_end = ktime_add(curr_intv_end, n * cycle); 256 } 257 } 258 259 return entry_found; 260 } 261 262 static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch) 263 { 264 struct taprio_sched *q = qdisc_priv(sch); 265 struct sched_gate_list *sched, *admin; 266 ktime_t interval_start, interval_end; 267 struct sched_entry *entry; 268 269 rcu_read_lock(); 270 sched = rcu_dereference(q->oper_sched); 271 admin = rcu_dereference(q->admin_sched); 272 273 entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp, 274 &interval_start, &interval_end, true); 275 rcu_read_unlock(); 276 277 return entry; 278 } 279 280 static bool taprio_flags_valid(u32 flags) 281 { 282 /* Make sure no other flag bits are set. */ 283 if (flags & ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST | 284 TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)) 285 return false; 286 /* txtime-assist and full offload are mutually exclusive */ 287 if ((flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) && 288 (flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)) 289 return false; 290 return true; 291 } 292 293 /* This returns the tstamp value set by TCP in terms of the set clock. */ 294 static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb) 295 { 296 unsigned int offset = skb_network_offset(skb); 297 const struct ipv6hdr *ipv6h; 298 const struct iphdr *iph; 299 struct ipv6hdr _ipv6h; 300 301 ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h); 302 if (!ipv6h) 303 return 0; 304 305 if (ipv6h->version == 4) { 306 iph = (struct iphdr *)ipv6h; 307 offset += iph->ihl * 4; 308 309 /* special-case 6in4 tunnelling, as that is a common way to get 310 * v6 connectivity in the home 311 */ 312 if (iph->protocol == IPPROTO_IPV6) { 313 ipv6h = skb_header_pointer(skb, offset, 314 sizeof(_ipv6h), &_ipv6h); 315 316 if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP) 317 return 0; 318 } else if (iph->protocol != IPPROTO_TCP) { 319 return 0; 320 } 321 } else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) { 322 return 0; 323 } 324 325 return taprio_mono_to_any(q, skb->skb_mstamp_ns); 326 } 327 328 /* There are a few scenarios where we will have to modify the txtime from 329 * what is read from next_txtime in sched_entry. They are: 330 * 1. If txtime is in the past, 331 * a. The gate for the traffic class is currently open and packet can be 332 * transmitted before it closes, schedule the packet right away. 333 * b. If the gate corresponding to the traffic class is going to open later 334 * in the cycle, set the txtime of packet to the interval start. 335 * 2. If txtime is in the future, there are packets corresponding to the 336 * current traffic class waiting to be transmitted. So, the following 337 * possibilities exist: 338 * a. We can transmit the packet before the window containing the txtime 339 * closes. 340 * b. The window might close before the transmission can be completed 341 * successfully. So, schedule the packet in the next open window. 342 */ 343 static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch) 344 { 345 ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp; 346 struct taprio_sched *q = qdisc_priv(sch); 347 struct sched_gate_list *sched, *admin; 348 ktime_t minimum_time, now, txtime; 349 int len, packet_transmit_time; 350 struct sched_entry *entry; 351 bool sched_changed; 352 353 now = taprio_get_time(q); 354 minimum_time = ktime_add_ns(now, q->txtime_delay); 355 356 tcp_tstamp = get_tcp_tstamp(q, skb); 357 minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp); 358 359 rcu_read_lock(); 360 admin = rcu_dereference(q->admin_sched); 361 sched = rcu_dereference(q->oper_sched); 362 if (admin && ktime_after(minimum_time, admin->base_time)) 363 switch_schedules(q, &admin, &sched); 364 365 /* Until the schedule starts, all the queues are open */ 366 if (!sched || ktime_before(minimum_time, sched->base_time)) { 367 txtime = minimum_time; 368 goto done; 369 } 370 371 len = qdisc_pkt_len(skb); 372 packet_transmit_time = length_to_duration(q, len); 373 374 do { 375 sched_changed = false; 376 377 entry = find_entry_to_transmit(skb, sch, sched, admin, 378 minimum_time, 379 &interval_start, &interval_end, 380 false); 381 if (!entry) { 382 txtime = 0; 383 goto done; 384 } 385 386 txtime = entry->next_txtime; 387 txtime = max_t(ktime_t, txtime, minimum_time); 388 txtime = max_t(ktime_t, txtime, interval_start); 389 390 if (admin && admin != sched && 391 ktime_after(txtime, admin->base_time)) { 392 sched = admin; 393 sched_changed = true; 394 continue; 395 } 396 397 transmit_end_time = ktime_add(txtime, packet_transmit_time); 398 minimum_time = transmit_end_time; 399 400 /* Update the txtime of current entry to the next time it's 401 * interval starts. 402 */ 403 if (ktime_after(transmit_end_time, interval_end)) 404 entry->next_txtime = ktime_add(interval_start, sched->cycle_time); 405 } while (sched_changed || ktime_after(transmit_end_time, interval_end)); 406 407 entry->next_txtime = transmit_end_time; 408 409 done: 410 rcu_read_unlock(); 411 return txtime; 412 } 413 414 static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch, 415 struct Qdisc *child, struct sk_buff **to_free) 416 { 417 struct taprio_sched *q = qdisc_priv(sch); 418 419 /* sk_flags are only safe to use on full sockets. */ 420 if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) { 421 if (!is_valid_interval(skb, sch)) 422 return qdisc_drop(skb, sch, to_free); 423 } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { 424 skb->tstamp = get_packet_txtime(skb, sch); 425 if (!skb->tstamp) 426 return qdisc_drop(skb, sch, to_free); 427 } 428 429 qdisc_qstats_backlog_inc(sch, skb); 430 sch->q.qlen++; 431 432 return qdisc_enqueue(skb, child, to_free); 433 } 434 435 /* Will not be called in the full offload case, since the TX queues are 436 * attached to the Qdisc created using qdisc_create_dflt() 437 */ 438 static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, 439 struct sk_buff **to_free) 440 { 441 struct taprio_sched *q = qdisc_priv(sch); 442 struct Qdisc *child; 443 int queue; 444 445 queue = skb_get_queue_mapping(skb); 446 447 child = q->qdiscs[queue]; 448 if (unlikely(!child)) 449 return qdisc_drop(skb, sch, to_free); 450 451 /* Large packets might not be transmitted when the transmission duration 452 * exceeds any configured interval. Therefore, segment the skb into 453 * smaller chunks. Drivers with full offload are expected to handle 454 * this in hardware. 455 */ 456 if (skb_is_gso(skb)) { 457 unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb); 458 netdev_features_t features = netif_skb_features(skb); 459 struct sk_buff *segs, *nskb; 460 int ret; 461 462 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); 463 if (IS_ERR_OR_NULL(segs)) 464 return qdisc_drop(skb, sch, to_free); 465 466 skb_list_walk_safe(segs, segs, nskb) { 467 skb_mark_not_on_list(segs); 468 qdisc_skb_cb(segs)->pkt_len = segs->len; 469 slen += segs->len; 470 471 ret = taprio_enqueue_one(segs, sch, child, to_free); 472 if (ret != NET_XMIT_SUCCESS) { 473 if (net_xmit_drop_count(ret)) 474 qdisc_qstats_drop(sch); 475 } else { 476 numsegs++; 477 } 478 } 479 480 if (numsegs > 1) 481 qdisc_tree_reduce_backlog(sch, 1 - numsegs, len - slen); 482 consume_skb(skb); 483 484 return numsegs > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; 485 } 486 487 return taprio_enqueue_one(skb, sch, child, to_free); 488 } 489 490 /* Will not be called in the full offload case, since the TX queues are 491 * attached to the Qdisc created using qdisc_create_dflt() 492 */ 493 static struct sk_buff *taprio_peek(struct Qdisc *sch) 494 { 495 struct taprio_sched *q = qdisc_priv(sch); 496 struct net_device *dev = qdisc_dev(sch); 497 struct sched_entry *entry; 498 struct sk_buff *skb; 499 u32 gate_mask; 500 int i; 501 502 rcu_read_lock(); 503 entry = rcu_dereference(q->current_entry); 504 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN; 505 rcu_read_unlock(); 506 507 if (!gate_mask) 508 return NULL; 509 510 for (i = 0; i < dev->num_tx_queues; i++) { 511 struct Qdisc *child = q->qdiscs[i]; 512 int prio; 513 u8 tc; 514 515 if (unlikely(!child)) 516 continue; 517 518 skb = child->ops->peek(child); 519 if (!skb) 520 continue; 521 522 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) 523 return skb; 524 525 prio = skb->priority; 526 tc = netdev_get_prio_tc_map(dev, prio); 527 528 if (!(gate_mask & BIT(tc))) 529 continue; 530 531 return skb; 532 } 533 534 return NULL; 535 } 536 537 static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry) 538 { 539 atomic_set(&entry->budget, 540 div64_u64((u64)entry->interval * PSEC_PER_NSEC, 541 atomic64_read(&q->picos_per_byte))); 542 } 543 544 /* Will not be called in the full offload case, since the TX queues are 545 * attached to the Qdisc created using qdisc_create_dflt() 546 */ 547 static struct sk_buff *taprio_dequeue(struct Qdisc *sch) 548 { 549 struct taprio_sched *q = qdisc_priv(sch); 550 struct net_device *dev = qdisc_dev(sch); 551 struct sk_buff *skb = NULL; 552 struct sched_entry *entry; 553 u32 gate_mask; 554 int i; 555 556 rcu_read_lock(); 557 entry = rcu_dereference(q->current_entry); 558 /* if there's no entry, it means that the schedule didn't 559 * start yet, so force all gates to be open, this is in 560 * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5 561 * "AdminGateStates" 562 */ 563 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN; 564 565 if (!gate_mask) 566 goto done; 567 568 for (i = 0; i < dev->num_tx_queues; i++) { 569 struct Qdisc *child = q->qdiscs[i]; 570 ktime_t guard; 571 int prio; 572 int len; 573 u8 tc; 574 575 if (unlikely(!child)) 576 continue; 577 578 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { 579 skb = child->ops->dequeue(child); 580 if (!skb) 581 continue; 582 goto skb_found; 583 } 584 585 skb = child->ops->peek(child); 586 if (!skb) 587 continue; 588 589 prio = skb->priority; 590 tc = netdev_get_prio_tc_map(dev, prio); 591 592 if (!(gate_mask & BIT(tc))) { 593 skb = NULL; 594 continue; 595 } 596 597 len = qdisc_pkt_len(skb); 598 guard = ktime_add_ns(taprio_get_time(q), 599 length_to_duration(q, len)); 600 601 /* In the case that there's no gate entry, there's no 602 * guard band ... 603 */ 604 if (gate_mask != TAPRIO_ALL_GATES_OPEN && 605 ktime_after(guard, entry->close_time)) { 606 skb = NULL; 607 continue; 608 } 609 610 /* ... and no budget. */ 611 if (gate_mask != TAPRIO_ALL_GATES_OPEN && 612 atomic_sub_return(len, &entry->budget) < 0) { 613 skb = NULL; 614 continue; 615 } 616 617 skb = child->ops->dequeue(child); 618 if (unlikely(!skb)) 619 goto done; 620 621 skb_found: 622 qdisc_bstats_update(sch, skb); 623 qdisc_qstats_backlog_dec(sch, skb); 624 sch->q.qlen--; 625 626 goto done; 627 } 628 629 done: 630 rcu_read_unlock(); 631 632 return skb; 633 } 634 635 static bool should_restart_cycle(const struct sched_gate_list *oper, 636 const struct sched_entry *entry) 637 { 638 if (list_is_last(&entry->list, &oper->entries)) 639 return true; 640 641 if (ktime_compare(entry->close_time, oper->cycle_close_time) == 0) 642 return true; 643 644 return false; 645 } 646 647 static bool should_change_schedules(const struct sched_gate_list *admin, 648 const struct sched_gate_list *oper, 649 ktime_t close_time) 650 { 651 ktime_t next_base_time, extension_time; 652 653 if (!admin) 654 return false; 655 656 next_base_time = sched_base_time(admin); 657 658 /* This is the simple case, the close_time would fall after 659 * the next schedule base_time. 660 */ 661 if (ktime_compare(next_base_time, close_time) <= 0) 662 return true; 663 664 /* This is the cycle_time_extension case, if the close_time 665 * plus the amount that can be extended would fall after the 666 * next schedule base_time, we can extend the current schedule 667 * for that amount. 668 */ 669 extension_time = ktime_add_ns(close_time, oper->cycle_time_extension); 670 671 /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about 672 * how precisely the extension should be made. So after 673 * conformance testing, this logic may change. 674 */ 675 if (ktime_compare(next_base_time, extension_time) <= 0) 676 return true; 677 678 return false; 679 } 680 681 static enum hrtimer_restart advance_sched(struct hrtimer *timer) 682 { 683 struct taprio_sched *q = container_of(timer, struct taprio_sched, 684 advance_timer); 685 struct sched_gate_list *oper, *admin; 686 struct sched_entry *entry, *next; 687 struct Qdisc *sch = q->root; 688 ktime_t close_time; 689 690 spin_lock(&q->current_entry_lock); 691 entry = rcu_dereference_protected(q->current_entry, 692 lockdep_is_held(&q->current_entry_lock)); 693 oper = rcu_dereference_protected(q->oper_sched, 694 lockdep_is_held(&q->current_entry_lock)); 695 admin = rcu_dereference_protected(q->admin_sched, 696 lockdep_is_held(&q->current_entry_lock)); 697 698 if (!oper) 699 switch_schedules(q, &admin, &oper); 700 701 /* This can happen in two cases: 1. this is the very first run 702 * of this function (i.e. we weren't running any schedule 703 * previously); 2. The previous schedule just ended. The first 704 * entry of all schedules are pre-calculated during the 705 * schedule initialization. 706 */ 707 if (unlikely(!entry || entry->close_time == oper->base_time)) { 708 next = list_first_entry(&oper->entries, struct sched_entry, 709 list); 710 close_time = next->close_time; 711 goto first_run; 712 } 713 714 if (should_restart_cycle(oper, entry)) { 715 next = list_first_entry(&oper->entries, struct sched_entry, 716 list); 717 oper->cycle_close_time = ktime_add_ns(oper->cycle_close_time, 718 oper->cycle_time); 719 } else { 720 next = list_next_entry(entry, list); 721 } 722 723 close_time = ktime_add_ns(entry->close_time, next->interval); 724 close_time = min_t(ktime_t, close_time, oper->cycle_close_time); 725 726 if (should_change_schedules(admin, oper, close_time)) { 727 /* Set things so the next time this runs, the new 728 * schedule runs. 729 */ 730 close_time = sched_base_time(admin); 731 switch_schedules(q, &admin, &oper); 732 } 733 734 next->close_time = close_time; 735 taprio_set_budget(q, next); 736 737 first_run: 738 rcu_assign_pointer(q->current_entry, next); 739 spin_unlock(&q->current_entry_lock); 740 741 hrtimer_set_expires(&q->advance_timer, close_time); 742 743 rcu_read_lock(); 744 __netif_schedule(sch); 745 rcu_read_unlock(); 746 747 return HRTIMER_RESTART; 748 } 749 750 static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { 751 [TCA_TAPRIO_SCHED_ENTRY_INDEX] = { .type = NLA_U32 }, 752 [TCA_TAPRIO_SCHED_ENTRY_CMD] = { .type = NLA_U8 }, 753 [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 }, 754 [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 }, 755 }; 756 757 static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { 758 [TCA_TAPRIO_ATTR_PRIOMAP] = { 759 .len = sizeof(struct tc_mqprio_qopt) 760 }, 761 [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED }, 762 [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 }, 763 [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED }, 764 [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 }, 765 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 }, 766 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 }, 767 [TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 }, 768 [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 }, 769 }; 770 771 static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb, 772 struct sched_entry *entry, 773 struct netlink_ext_ack *extack) 774 { 775 int min_duration = length_to_duration(q, ETH_ZLEN); 776 u32 interval = 0; 777 778 if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD]) 779 entry->command = nla_get_u8( 780 tb[TCA_TAPRIO_SCHED_ENTRY_CMD]); 781 782 if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]) 783 entry->gate_mask = nla_get_u32( 784 tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]); 785 786 if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]) 787 interval = nla_get_u32( 788 tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]); 789 790 /* The interval should allow at least the minimum ethernet 791 * frame to go out. 792 */ 793 if (interval < min_duration) { 794 NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry"); 795 return -EINVAL; 796 } 797 798 entry->interval = interval; 799 800 return 0; 801 } 802 803 static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n, 804 struct sched_entry *entry, int index, 805 struct netlink_ext_ack *extack) 806 { 807 struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { }; 808 int err; 809 810 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n, 811 entry_policy, NULL); 812 if (err < 0) { 813 NL_SET_ERR_MSG(extack, "Could not parse nested entry"); 814 return -EINVAL; 815 } 816 817 entry->index = index; 818 819 return fill_sched_entry(q, tb, entry, extack); 820 } 821 822 static int parse_sched_list(struct taprio_sched *q, struct nlattr *list, 823 struct sched_gate_list *sched, 824 struct netlink_ext_ack *extack) 825 { 826 struct nlattr *n; 827 int err, rem; 828 int i = 0; 829 830 if (!list) 831 return -EINVAL; 832 833 nla_for_each_nested(n, list, rem) { 834 struct sched_entry *entry; 835 836 if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) { 837 NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'"); 838 continue; 839 } 840 841 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 842 if (!entry) { 843 NL_SET_ERR_MSG(extack, "Not enough memory for entry"); 844 return -ENOMEM; 845 } 846 847 err = parse_sched_entry(q, n, entry, i, extack); 848 if (err < 0) { 849 kfree(entry); 850 return err; 851 } 852 853 list_add_tail(&entry->list, &sched->entries); 854 i++; 855 } 856 857 sched->num_entries = i; 858 859 return i; 860 } 861 862 static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb, 863 struct sched_gate_list *new, 864 struct netlink_ext_ack *extack) 865 { 866 int err = 0; 867 868 if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) { 869 NL_SET_ERR_MSG(extack, "Adding a single entry is not supported"); 870 return -ENOTSUPP; 871 } 872 873 if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]) 874 new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]); 875 876 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]) 877 new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]); 878 879 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]) 880 new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]); 881 882 if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]) 883 err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], 884 new, extack); 885 if (err < 0) 886 return err; 887 888 if (!new->cycle_time) { 889 struct sched_entry *entry; 890 ktime_t cycle = 0; 891 892 list_for_each_entry(entry, &new->entries, list) 893 cycle = ktime_add_ns(cycle, entry->interval); 894 895 if (!cycle) { 896 NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0"); 897 return -EINVAL; 898 } 899 900 new->cycle_time = cycle; 901 } 902 903 return 0; 904 } 905 906 static int taprio_parse_mqprio_opt(struct net_device *dev, 907 struct tc_mqprio_qopt *qopt, 908 struct netlink_ext_ack *extack, 909 u32 taprio_flags) 910 { 911 int i, j; 912 913 if (!qopt && !dev->num_tc) { 914 NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary"); 915 return -EINVAL; 916 } 917 918 /* If num_tc is already set, it means that the user already 919 * configured the mqprio part 920 */ 921 if (dev->num_tc) 922 return 0; 923 924 /* Verify num_tc is not out of max range */ 925 if (qopt->num_tc > TC_MAX_QUEUE) { 926 NL_SET_ERR_MSG(extack, "Number of traffic classes is outside valid range"); 927 return -EINVAL; 928 } 929 930 /* taprio imposes that traffic classes map 1:n to tx queues */ 931 if (qopt->num_tc > dev->num_tx_queues) { 932 NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues"); 933 return -EINVAL; 934 } 935 936 /* Verify priority mapping uses valid tcs */ 937 for (i = 0; i <= TC_BITMASK; i++) { 938 if (qopt->prio_tc_map[i] >= qopt->num_tc) { 939 NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping"); 940 return -EINVAL; 941 } 942 } 943 944 for (i = 0; i < qopt->num_tc; i++) { 945 unsigned int last = qopt->offset[i] + qopt->count[i]; 946 947 /* Verify the queue count is in tx range being equal to the 948 * real_num_tx_queues indicates the last queue is in use. 949 */ 950 if (qopt->offset[i] >= dev->num_tx_queues || 951 !qopt->count[i] || 952 last > dev->real_num_tx_queues) { 953 NL_SET_ERR_MSG(extack, "Invalid queue in traffic class to queue mapping"); 954 return -EINVAL; 955 } 956 957 if (TXTIME_ASSIST_IS_ENABLED(taprio_flags)) 958 continue; 959 960 /* Verify that the offset and counts do not overlap */ 961 for (j = i + 1; j < qopt->num_tc; j++) { 962 if (last > qopt->offset[j]) { 963 NL_SET_ERR_MSG(extack, "Detected overlap in the traffic class to queue mapping"); 964 return -EINVAL; 965 } 966 } 967 } 968 969 return 0; 970 } 971 972 static int taprio_get_start_time(struct Qdisc *sch, 973 struct sched_gate_list *sched, 974 ktime_t *start) 975 { 976 struct taprio_sched *q = qdisc_priv(sch); 977 ktime_t now, base, cycle; 978 s64 n; 979 980 base = sched_base_time(sched); 981 now = taprio_get_time(q); 982 983 if (ktime_after(base, now)) { 984 *start = base; 985 return 0; 986 } 987 988 cycle = sched->cycle_time; 989 990 /* The qdisc is expected to have at least one sched_entry. Moreover, 991 * any entry must have 'interval' > 0. Thus if the cycle time is zero, 992 * something went really wrong. In that case, we should warn about this 993 * inconsistent state and return error. 994 */ 995 if (WARN_ON(!cycle)) 996 return -EFAULT; 997 998 /* Schedule the start time for the beginning of the next 999 * cycle. 1000 */ 1001 n = div64_s64(ktime_sub_ns(now, base), cycle); 1002 *start = ktime_add_ns(base, (n + 1) * cycle); 1003 return 0; 1004 } 1005 1006 static void setup_first_close_time(struct taprio_sched *q, 1007 struct sched_gate_list *sched, ktime_t base) 1008 { 1009 struct sched_entry *first; 1010 ktime_t cycle; 1011 1012 first = list_first_entry(&sched->entries, 1013 struct sched_entry, list); 1014 1015 cycle = sched->cycle_time; 1016 1017 /* FIXME: find a better place to do this */ 1018 sched->cycle_close_time = ktime_add_ns(base, cycle); 1019 1020 first->close_time = ktime_add_ns(base, first->interval); 1021 taprio_set_budget(q, first); 1022 rcu_assign_pointer(q->current_entry, NULL); 1023 } 1024 1025 static void taprio_start_sched(struct Qdisc *sch, 1026 ktime_t start, struct sched_gate_list *new) 1027 { 1028 struct taprio_sched *q = qdisc_priv(sch); 1029 ktime_t expires; 1030 1031 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) 1032 return; 1033 1034 expires = hrtimer_get_expires(&q->advance_timer); 1035 if (expires == 0) 1036 expires = KTIME_MAX; 1037 1038 /* If the new schedule starts before the next expiration, we 1039 * reprogram it to the earliest one, so we change the admin 1040 * schedule to the operational one at the right time. 1041 */ 1042 start = min_t(ktime_t, start, expires); 1043 1044 hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS); 1045 } 1046 1047 static void taprio_set_picos_per_byte(struct net_device *dev, 1048 struct taprio_sched *q) 1049 { 1050 struct ethtool_link_ksettings ecmd; 1051 int speed = SPEED_10; 1052 int picos_per_byte; 1053 int err; 1054 1055 err = __ethtool_get_link_ksettings(dev, &ecmd); 1056 if (err < 0) 1057 goto skip; 1058 1059 if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN) 1060 speed = ecmd.base.speed; 1061 1062 skip: 1063 picos_per_byte = (USEC_PER_SEC * 8) / speed; 1064 1065 atomic64_set(&q->picos_per_byte, picos_per_byte); 1066 netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n", 1067 dev->name, (long long)atomic64_read(&q->picos_per_byte), 1068 ecmd.base.speed); 1069 } 1070 1071 static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event, 1072 void *ptr) 1073 { 1074 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1075 struct net_device *qdev; 1076 struct taprio_sched *q; 1077 bool found = false; 1078 1079 ASSERT_RTNL(); 1080 1081 if (event != NETDEV_UP && event != NETDEV_CHANGE) 1082 return NOTIFY_DONE; 1083 1084 list_for_each_entry(q, &taprio_list, taprio_list) { 1085 qdev = qdisc_dev(q->root); 1086 if (qdev == dev) { 1087 found = true; 1088 break; 1089 } 1090 } 1091 1092 if (found) 1093 taprio_set_picos_per_byte(dev, q); 1094 1095 return NOTIFY_DONE; 1096 } 1097 1098 static void setup_txtime(struct taprio_sched *q, 1099 struct sched_gate_list *sched, ktime_t base) 1100 { 1101 struct sched_entry *entry; 1102 u32 interval = 0; 1103 1104 list_for_each_entry(entry, &sched->entries, list) { 1105 entry->next_txtime = ktime_add_ns(base, interval); 1106 interval += entry->interval; 1107 } 1108 } 1109 1110 static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries) 1111 { 1112 struct __tc_taprio_qopt_offload *__offload; 1113 1114 __offload = kzalloc(struct_size(__offload, offload.entries, num_entries), 1115 GFP_KERNEL); 1116 if (!__offload) 1117 return NULL; 1118 1119 refcount_set(&__offload->users, 1); 1120 1121 return &__offload->offload; 1122 } 1123 1124 struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload 1125 *offload) 1126 { 1127 struct __tc_taprio_qopt_offload *__offload; 1128 1129 __offload = container_of(offload, struct __tc_taprio_qopt_offload, 1130 offload); 1131 1132 refcount_inc(&__offload->users); 1133 1134 return offload; 1135 } 1136 EXPORT_SYMBOL_GPL(taprio_offload_get); 1137 1138 void taprio_offload_free(struct tc_taprio_qopt_offload *offload) 1139 { 1140 struct __tc_taprio_qopt_offload *__offload; 1141 1142 __offload = container_of(offload, struct __tc_taprio_qopt_offload, 1143 offload); 1144 1145 if (!refcount_dec_and_test(&__offload->users)) 1146 return; 1147 1148 kfree(__offload); 1149 } 1150 EXPORT_SYMBOL_GPL(taprio_offload_free); 1151 1152 /* The function will only serve to keep the pointers to the "oper" and "admin" 1153 * schedules valid in relation to their base times, so when calling dump() the 1154 * users looks at the right schedules. 1155 * When using full offload, the admin configuration is promoted to oper at the 1156 * base_time in the PHC time domain. But because the system time is not 1157 * necessarily in sync with that, we can't just trigger a hrtimer to call 1158 * switch_schedules at the right hardware time. 1159 * At the moment we call this by hand right away from taprio, but in the future 1160 * it will be useful to create a mechanism for drivers to notify taprio of the 1161 * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump(). 1162 * This is left as TODO. 1163 */ 1164 static void taprio_offload_config_changed(struct taprio_sched *q) 1165 { 1166 struct sched_gate_list *oper, *admin; 1167 1168 oper = rtnl_dereference(q->oper_sched); 1169 admin = rtnl_dereference(q->admin_sched); 1170 1171 switch_schedules(q, &admin, &oper); 1172 } 1173 1174 static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask) 1175 { 1176 u32 i, queue_mask = 0; 1177 1178 for (i = 0; i < dev->num_tc; i++) { 1179 u32 offset, count; 1180 1181 if (!(tc_mask & BIT(i))) 1182 continue; 1183 1184 offset = dev->tc_to_txq[i].offset; 1185 count = dev->tc_to_txq[i].count; 1186 1187 queue_mask |= GENMASK(offset + count - 1, offset); 1188 } 1189 1190 return queue_mask; 1191 } 1192 1193 static void taprio_sched_to_offload(struct net_device *dev, 1194 struct sched_gate_list *sched, 1195 struct tc_taprio_qopt_offload *offload) 1196 { 1197 struct sched_entry *entry; 1198 int i = 0; 1199 1200 offload->base_time = sched->base_time; 1201 offload->cycle_time = sched->cycle_time; 1202 offload->cycle_time_extension = sched->cycle_time_extension; 1203 1204 list_for_each_entry(entry, &sched->entries, list) { 1205 struct tc_taprio_sched_entry *e = &offload->entries[i]; 1206 1207 e->command = entry->command; 1208 e->interval = entry->interval; 1209 e->gate_mask = tc_map_to_queue_mask(dev, entry->gate_mask); 1210 1211 i++; 1212 } 1213 1214 offload->num_entries = i; 1215 } 1216 1217 static int taprio_enable_offload(struct net_device *dev, 1218 struct taprio_sched *q, 1219 struct sched_gate_list *sched, 1220 struct netlink_ext_ack *extack) 1221 { 1222 const struct net_device_ops *ops = dev->netdev_ops; 1223 struct tc_taprio_qopt_offload *offload; 1224 int err = 0; 1225 1226 if (!ops->ndo_setup_tc) { 1227 NL_SET_ERR_MSG(extack, 1228 "Device does not support taprio offload"); 1229 return -EOPNOTSUPP; 1230 } 1231 1232 offload = taprio_offload_alloc(sched->num_entries); 1233 if (!offload) { 1234 NL_SET_ERR_MSG(extack, 1235 "Not enough memory for enabling offload mode"); 1236 return -ENOMEM; 1237 } 1238 offload->enable = 1; 1239 taprio_sched_to_offload(dev, sched, offload); 1240 1241 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); 1242 if (err < 0) { 1243 NL_SET_ERR_MSG(extack, 1244 "Device failed to setup taprio offload"); 1245 goto done; 1246 } 1247 1248 q->offloaded = true; 1249 1250 done: 1251 taprio_offload_free(offload); 1252 1253 return err; 1254 } 1255 1256 static int taprio_disable_offload(struct net_device *dev, 1257 struct taprio_sched *q, 1258 struct netlink_ext_ack *extack) 1259 { 1260 const struct net_device_ops *ops = dev->netdev_ops; 1261 struct tc_taprio_qopt_offload *offload; 1262 int err; 1263 1264 if (!q->offloaded) 1265 return 0; 1266 1267 offload = taprio_offload_alloc(0); 1268 if (!offload) { 1269 NL_SET_ERR_MSG(extack, 1270 "Not enough memory to disable offload mode"); 1271 return -ENOMEM; 1272 } 1273 offload->enable = 0; 1274 1275 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); 1276 if (err < 0) { 1277 NL_SET_ERR_MSG(extack, 1278 "Device failed to disable offload"); 1279 goto out; 1280 } 1281 1282 q->offloaded = false; 1283 1284 out: 1285 taprio_offload_free(offload); 1286 1287 return err; 1288 } 1289 1290 /* If full offload is enabled, the only possible clockid is the net device's 1291 * PHC. For that reason, specifying a clockid through netlink is incorrect. 1292 * For txtime-assist, it is implicitly assumed that the device's PHC is kept 1293 * in sync with the specified clockid via a user space daemon such as phc2sys. 1294 * For both software taprio and txtime-assist, the clockid is used for the 1295 * hrtimer that advances the schedule and hence mandatory. 1296 */ 1297 static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb, 1298 struct netlink_ext_ack *extack) 1299 { 1300 struct taprio_sched *q = qdisc_priv(sch); 1301 struct net_device *dev = qdisc_dev(sch); 1302 int err = -EINVAL; 1303 1304 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { 1305 const struct ethtool_ops *ops = dev->ethtool_ops; 1306 struct ethtool_ts_info info = { 1307 .cmd = ETHTOOL_GET_TS_INFO, 1308 .phc_index = -1, 1309 }; 1310 1311 if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) { 1312 NL_SET_ERR_MSG(extack, 1313 "The 'clockid' cannot be specified for full offload"); 1314 goto out; 1315 } 1316 1317 if (ops && ops->get_ts_info) 1318 err = ops->get_ts_info(dev, &info); 1319 1320 if (err || info.phc_index < 0) { 1321 NL_SET_ERR_MSG(extack, 1322 "Device does not have a PTP clock"); 1323 err = -ENOTSUPP; 1324 goto out; 1325 } 1326 } else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) { 1327 int clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]); 1328 enum tk_offsets tk_offset; 1329 1330 /* We only support static clockids and we don't allow 1331 * for it to be modified after the first init. 1332 */ 1333 if (clockid < 0 || 1334 (q->clockid != -1 && q->clockid != clockid)) { 1335 NL_SET_ERR_MSG(extack, 1336 "Changing the 'clockid' of a running schedule is not supported"); 1337 err = -ENOTSUPP; 1338 goto out; 1339 } 1340 1341 switch (clockid) { 1342 case CLOCK_REALTIME: 1343 tk_offset = TK_OFFS_REAL; 1344 break; 1345 case CLOCK_MONOTONIC: 1346 tk_offset = TK_OFFS_MAX; 1347 break; 1348 case CLOCK_BOOTTIME: 1349 tk_offset = TK_OFFS_BOOT; 1350 break; 1351 case CLOCK_TAI: 1352 tk_offset = TK_OFFS_TAI; 1353 break; 1354 default: 1355 NL_SET_ERR_MSG(extack, "Invalid 'clockid'"); 1356 err = -EINVAL; 1357 goto out; 1358 } 1359 /* This pairs with READ_ONCE() in taprio_mono_to_any */ 1360 WRITE_ONCE(q->tk_offset, tk_offset); 1361 1362 q->clockid = clockid; 1363 } else { 1364 NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory"); 1365 goto out; 1366 } 1367 1368 /* Everything went ok, return success. */ 1369 err = 0; 1370 1371 out: 1372 return err; 1373 } 1374 1375 static int taprio_mqprio_cmp(const struct net_device *dev, 1376 const struct tc_mqprio_qopt *mqprio) 1377 { 1378 int i; 1379 1380 if (!mqprio || mqprio->num_tc != dev->num_tc) 1381 return -1; 1382 1383 for (i = 0; i < mqprio->num_tc; i++) 1384 if (dev->tc_to_txq[i].count != mqprio->count[i] || 1385 dev->tc_to_txq[i].offset != mqprio->offset[i]) 1386 return -1; 1387 1388 for (i = 0; i <= TC_BITMASK; i++) 1389 if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i]) 1390 return -1; 1391 1392 return 0; 1393 } 1394 1395 /* The semantics of the 'flags' argument in relation to 'change()' 1396 * requests, are interpreted following two rules (which are applied in 1397 * this order): (1) an omitted 'flags' argument is interpreted as 1398 * zero; (2) the 'flags' of a "running" taprio instance cannot be 1399 * changed. 1400 */ 1401 static int taprio_new_flags(const struct nlattr *attr, u32 old, 1402 struct netlink_ext_ack *extack) 1403 { 1404 u32 new = 0; 1405 1406 if (attr) 1407 new = nla_get_u32(attr); 1408 1409 if (old != TAPRIO_FLAGS_INVALID && old != new) { 1410 NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported"); 1411 return -EOPNOTSUPP; 1412 } 1413 1414 if (!taprio_flags_valid(new)) { 1415 NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid"); 1416 return -EINVAL; 1417 } 1418 1419 return new; 1420 } 1421 1422 static int taprio_change(struct Qdisc *sch, struct nlattr *opt, 1423 struct netlink_ext_ack *extack) 1424 { 1425 struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { }; 1426 struct sched_gate_list *oper, *admin, *new_admin; 1427 struct taprio_sched *q = qdisc_priv(sch); 1428 struct net_device *dev = qdisc_dev(sch); 1429 struct tc_mqprio_qopt *mqprio = NULL; 1430 unsigned long flags; 1431 ktime_t start; 1432 int i, err; 1433 1434 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt, 1435 taprio_policy, extack); 1436 if (err < 0) 1437 return err; 1438 1439 if (tb[TCA_TAPRIO_ATTR_PRIOMAP]) 1440 mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]); 1441 1442 err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS], 1443 q->flags, extack); 1444 if (err < 0) 1445 return err; 1446 1447 q->flags = err; 1448 1449 err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags); 1450 if (err < 0) 1451 return err; 1452 1453 new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL); 1454 if (!new_admin) { 1455 NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule"); 1456 return -ENOMEM; 1457 } 1458 INIT_LIST_HEAD(&new_admin->entries); 1459 1460 oper = rtnl_dereference(q->oper_sched); 1461 admin = rtnl_dereference(q->admin_sched); 1462 1463 /* no changes - no new mqprio settings */ 1464 if (!taprio_mqprio_cmp(dev, mqprio)) 1465 mqprio = NULL; 1466 1467 if (mqprio && (oper || admin)) { 1468 NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported"); 1469 err = -ENOTSUPP; 1470 goto free_sched; 1471 } 1472 1473 err = parse_taprio_schedule(q, tb, new_admin, extack); 1474 if (err < 0) 1475 goto free_sched; 1476 1477 if (new_admin->num_entries == 0) { 1478 NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule"); 1479 err = -EINVAL; 1480 goto free_sched; 1481 } 1482 1483 err = taprio_parse_clockid(sch, tb, extack); 1484 if (err < 0) 1485 goto free_sched; 1486 1487 taprio_set_picos_per_byte(dev, q); 1488 1489 if (mqprio) { 1490 err = netdev_set_num_tc(dev, mqprio->num_tc); 1491 if (err) 1492 goto free_sched; 1493 for (i = 0; i < mqprio->num_tc; i++) 1494 netdev_set_tc_queue(dev, i, 1495 mqprio->count[i], 1496 mqprio->offset[i]); 1497 1498 /* Always use supplied priority mappings */ 1499 for (i = 0; i <= TC_BITMASK; i++) 1500 netdev_set_prio_tc_map(dev, i, 1501 mqprio->prio_tc_map[i]); 1502 } 1503 1504 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) 1505 err = taprio_enable_offload(dev, q, new_admin, extack); 1506 else 1507 err = taprio_disable_offload(dev, q, extack); 1508 if (err) 1509 goto free_sched; 1510 1511 /* Protects against enqueue()/dequeue() */ 1512 spin_lock_bh(qdisc_lock(sch)); 1513 1514 if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) { 1515 if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) { 1516 NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled"); 1517 err = -EINVAL; 1518 goto unlock; 1519 } 1520 1521 q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]); 1522 } 1523 1524 if (!TXTIME_ASSIST_IS_ENABLED(q->flags) && 1525 !FULL_OFFLOAD_IS_ENABLED(q->flags) && 1526 !hrtimer_active(&q->advance_timer)) { 1527 hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS); 1528 q->advance_timer.function = advance_sched; 1529 } 1530 1531 err = taprio_get_start_time(sch, new_admin, &start); 1532 if (err < 0) { 1533 NL_SET_ERR_MSG(extack, "Internal error: failed get start time"); 1534 goto unlock; 1535 } 1536 1537 setup_txtime(q, new_admin, start); 1538 1539 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { 1540 if (!oper) { 1541 rcu_assign_pointer(q->oper_sched, new_admin); 1542 err = 0; 1543 new_admin = NULL; 1544 goto unlock; 1545 } 1546 1547 rcu_assign_pointer(q->admin_sched, new_admin); 1548 if (admin) 1549 call_rcu(&admin->rcu, taprio_free_sched_cb); 1550 } else { 1551 setup_first_close_time(q, new_admin, start); 1552 1553 /* Protects against advance_sched() */ 1554 spin_lock_irqsave(&q->current_entry_lock, flags); 1555 1556 taprio_start_sched(sch, start, new_admin); 1557 1558 rcu_assign_pointer(q->admin_sched, new_admin); 1559 if (admin) 1560 call_rcu(&admin->rcu, taprio_free_sched_cb); 1561 1562 spin_unlock_irqrestore(&q->current_entry_lock, flags); 1563 1564 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) 1565 taprio_offload_config_changed(q); 1566 } 1567 1568 new_admin = NULL; 1569 err = 0; 1570 1571 unlock: 1572 spin_unlock_bh(qdisc_lock(sch)); 1573 1574 free_sched: 1575 if (new_admin) 1576 call_rcu(&new_admin->rcu, taprio_free_sched_cb); 1577 1578 return err; 1579 } 1580 1581 static void taprio_reset(struct Qdisc *sch) 1582 { 1583 struct taprio_sched *q = qdisc_priv(sch); 1584 struct net_device *dev = qdisc_dev(sch); 1585 int i; 1586 1587 hrtimer_cancel(&q->advance_timer); 1588 if (q->qdiscs) { 1589 for (i = 0; i < dev->num_tx_queues; i++) 1590 if (q->qdiscs[i]) 1591 qdisc_reset(q->qdiscs[i]); 1592 } 1593 } 1594 1595 static void taprio_destroy(struct Qdisc *sch) 1596 { 1597 struct taprio_sched *q = qdisc_priv(sch); 1598 struct net_device *dev = qdisc_dev(sch); 1599 struct sched_gate_list *oper, *admin; 1600 unsigned int i; 1601 1602 list_del(&q->taprio_list); 1603 1604 /* Note that taprio_reset() might not be called if an error 1605 * happens in qdisc_create(), after taprio_init() has been called. 1606 */ 1607 hrtimer_cancel(&q->advance_timer); 1608 1609 taprio_disable_offload(dev, q, NULL); 1610 1611 if (q->qdiscs) { 1612 for (i = 0; i < dev->num_tx_queues; i++) 1613 qdisc_put(q->qdiscs[i]); 1614 1615 kfree(q->qdiscs); 1616 } 1617 q->qdiscs = NULL; 1618 1619 netdev_reset_tc(dev); 1620 1621 oper = rtnl_dereference(q->oper_sched); 1622 admin = rtnl_dereference(q->admin_sched); 1623 1624 if (oper) 1625 call_rcu(&oper->rcu, taprio_free_sched_cb); 1626 1627 if (admin) 1628 call_rcu(&admin->rcu, taprio_free_sched_cb); 1629 } 1630 1631 static int taprio_init(struct Qdisc *sch, struct nlattr *opt, 1632 struct netlink_ext_ack *extack) 1633 { 1634 struct taprio_sched *q = qdisc_priv(sch); 1635 struct net_device *dev = qdisc_dev(sch); 1636 int i; 1637 1638 spin_lock_init(&q->current_entry_lock); 1639 1640 hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS); 1641 q->advance_timer.function = advance_sched; 1642 1643 q->root = sch; 1644 1645 /* We only support static clockids. Use an invalid value as default 1646 * and get the valid one on taprio_change(). 1647 */ 1648 q->clockid = -1; 1649 q->flags = TAPRIO_FLAGS_INVALID; 1650 1651 list_add(&q->taprio_list, &taprio_list); 1652 1653 if (sch->parent != TC_H_ROOT) { 1654 NL_SET_ERR_MSG_MOD(extack, "Can only be attached as root qdisc"); 1655 return -EOPNOTSUPP; 1656 } 1657 1658 if (!netif_is_multiqueue(dev)) { 1659 NL_SET_ERR_MSG_MOD(extack, "Multi-queue device is required"); 1660 return -EOPNOTSUPP; 1661 } 1662 1663 /* pre-allocate qdisc, attachment can't fail */ 1664 q->qdiscs = kcalloc(dev->num_tx_queues, 1665 sizeof(q->qdiscs[0]), 1666 GFP_KERNEL); 1667 1668 if (!q->qdiscs) 1669 return -ENOMEM; 1670 1671 if (!opt) 1672 return -EINVAL; 1673 1674 for (i = 0; i < dev->num_tx_queues; i++) { 1675 struct netdev_queue *dev_queue; 1676 struct Qdisc *qdisc; 1677 1678 dev_queue = netdev_get_tx_queue(dev, i); 1679 qdisc = qdisc_create_dflt(dev_queue, 1680 &pfifo_qdisc_ops, 1681 TC_H_MAKE(TC_H_MAJ(sch->handle), 1682 TC_H_MIN(i + 1)), 1683 extack); 1684 if (!qdisc) 1685 return -ENOMEM; 1686 1687 if (i < dev->real_num_tx_queues) 1688 qdisc_hash_add(qdisc, false); 1689 1690 q->qdiscs[i] = qdisc; 1691 } 1692 1693 return taprio_change(sch, opt, extack); 1694 } 1695 1696 static void taprio_attach(struct Qdisc *sch) 1697 { 1698 struct taprio_sched *q = qdisc_priv(sch); 1699 struct net_device *dev = qdisc_dev(sch); 1700 unsigned int ntx; 1701 1702 /* Attach underlying qdisc */ 1703 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { 1704 struct Qdisc *qdisc = q->qdiscs[ntx]; 1705 struct Qdisc *old; 1706 1707 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { 1708 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 1709 old = dev_graft_qdisc(qdisc->dev_queue, qdisc); 1710 } else { 1711 old = dev_graft_qdisc(qdisc->dev_queue, sch); 1712 qdisc_refcount_inc(sch); 1713 } 1714 if (old) 1715 qdisc_put(old); 1716 } 1717 1718 /* access to the child qdiscs is not needed in offload mode */ 1719 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { 1720 kfree(q->qdiscs); 1721 q->qdiscs = NULL; 1722 } 1723 } 1724 1725 static struct netdev_queue *taprio_queue_get(struct Qdisc *sch, 1726 unsigned long cl) 1727 { 1728 struct net_device *dev = qdisc_dev(sch); 1729 unsigned long ntx = cl - 1; 1730 1731 if (ntx >= dev->num_tx_queues) 1732 return NULL; 1733 1734 return netdev_get_tx_queue(dev, ntx); 1735 } 1736 1737 static int taprio_graft(struct Qdisc *sch, unsigned long cl, 1738 struct Qdisc *new, struct Qdisc **old, 1739 struct netlink_ext_ack *extack) 1740 { 1741 struct taprio_sched *q = qdisc_priv(sch); 1742 struct net_device *dev = qdisc_dev(sch); 1743 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); 1744 1745 if (!dev_queue) 1746 return -EINVAL; 1747 1748 if (dev->flags & IFF_UP) 1749 dev_deactivate(dev); 1750 1751 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { 1752 *old = dev_graft_qdisc(dev_queue, new); 1753 } else { 1754 *old = q->qdiscs[cl - 1]; 1755 q->qdiscs[cl - 1] = new; 1756 } 1757 1758 if (new) 1759 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 1760 1761 if (dev->flags & IFF_UP) 1762 dev_activate(dev); 1763 1764 return 0; 1765 } 1766 1767 static int dump_entry(struct sk_buff *msg, 1768 const struct sched_entry *entry) 1769 { 1770 struct nlattr *item; 1771 1772 item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY); 1773 if (!item) 1774 return -ENOSPC; 1775 1776 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index)) 1777 goto nla_put_failure; 1778 1779 if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command)) 1780 goto nla_put_failure; 1781 1782 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, 1783 entry->gate_mask)) 1784 goto nla_put_failure; 1785 1786 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL, 1787 entry->interval)) 1788 goto nla_put_failure; 1789 1790 return nla_nest_end(msg, item); 1791 1792 nla_put_failure: 1793 nla_nest_cancel(msg, item); 1794 return -1; 1795 } 1796 1797 static int dump_schedule(struct sk_buff *msg, 1798 const struct sched_gate_list *root) 1799 { 1800 struct nlattr *entry_list; 1801 struct sched_entry *entry; 1802 1803 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME, 1804 root->base_time, TCA_TAPRIO_PAD)) 1805 return -1; 1806 1807 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME, 1808 root->cycle_time, TCA_TAPRIO_PAD)) 1809 return -1; 1810 1811 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, 1812 root->cycle_time_extension, TCA_TAPRIO_PAD)) 1813 return -1; 1814 1815 entry_list = nla_nest_start_noflag(msg, 1816 TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST); 1817 if (!entry_list) 1818 goto error_nest; 1819 1820 list_for_each_entry(entry, &root->entries, list) { 1821 if (dump_entry(msg, entry) < 0) 1822 goto error_nest; 1823 } 1824 1825 nla_nest_end(msg, entry_list); 1826 return 0; 1827 1828 error_nest: 1829 nla_nest_cancel(msg, entry_list); 1830 return -1; 1831 } 1832 1833 static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb) 1834 { 1835 struct taprio_sched *q = qdisc_priv(sch); 1836 struct net_device *dev = qdisc_dev(sch); 1837 struct sched_gate_list *oper, *admin; 1838 struct tc_mqprio_qopt opt = { 0 }; 1839 struct nlattr *nest, *sched_nest; 1840 unsigned int i; 1841 1842 oper = rtnl_dereference(q->oper_sched); 1843 admin = rtnl_dereference(q->admin_sched); 1844 1845 opt.num_tc = netdev_get_num_tc(dev); 1846 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map)); 1847 1848 for (i = 0; i < netdev_get_num_tc(dev); i++) { 1849 opt.count[i] = dev->tc_to_txq[i].count; 1850 opt.offset[i] = dev->tc_to_txq[i].offset; 1851 } 1852 1853 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 1854 if (!nest) 1855 goto start_error; 1856 1857 if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt)) 1858 goto options_error; 1859 1860 if (!FULL_OFFLOAD_IS_ENABLED(q->flags) && 1861 nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid)) 1862 goto options_error; 1863 1864 if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags)) 1865 goto options_error; 1866 1867 if (q->txtime_delay && 1868 nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay)) 1869 goto options_error; 1870 1871 if (oper && dump_schedule(skb, oper)) 1872 goto options_error; 1873 1874 if (!admin) 1875 goto done; 1876 1877 sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED); 1878 if (!sched_nest) 1879 goto options_error; 1880 1881 if (dump_schedule(skb, admin)) 1882 goto admin_error; 1883 1884 nla_nest_end(skb, sched_nest); 1885 1886 done: 1887 return nla_nest_end(skb, nest); 1888 1889 admin_error: 1890 nla_nest_cancel(skb, sched_nest); 1891 1892 options_error: 1893 nla_nest_cancel(skb, nest); 1894 1895 start_error: 1896 return -ENOSPC; 1897 } 1898 1899 static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl) 1900 { 1901 struct taprio_sched *q = qdisc_priv(sch); 1902 struct net_device *dev = qdisc_dev(sch); 1903 unsigned int ntx = cl - 1; 1904 1905 if (ntx >= dev->num_tx_queues) 1906 return NULL; 1907 1908 return q->qdiscs[ntx]; 1909 } 1910 1911 static unsigned long taprio_find(struct Qdisc *sch, u32 classid) 1912 { 1913 unsigned int ntx = TC_H_MIN(classid); 1914 1915 if (!taprio_queue_get(sch, ntx)) 1916 return 0; 1917 return ntx; 1918 } 1919 1920 static int taprio_dump_class(struct Qdisc *sch, unsigned long cl, 1921 struct sk_buff *skb, struct tcmsg *tcm) 1922 { 1923 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); 1924 1925 tcm->tcm_parent = TC_H_ROOT; 1926 tcm->tcm_handle |= TC_H_MIN(cl); 1927 tcm->tcm_info = dev_queue->qdisc_sleeping->handle; 1928 1929 return 0; 1930 } 1931 1932 static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, 1933 struct gnet_dump *d) 1934 __releases(d->lock) 1935 __acquires(d->lock) 1936 { 1937 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); 1938 1939 sch = dev_queue->qdisc_sleeping; 1940 if (gnet_stats_copy_basic(d, NULL, &sch->bstats, true) < 0 || 1941 qdisc_qstats_copy(d, sch) < 0) 1942 return -1; 1943 return 0; 1944 } 1945 1946 static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg) 1947 { 1948 struct net_device *dev = qdisc_dev(sch); 1949 unsigned long ntx; 1950 1951 if (arg->stop) 1952 return; 1953 1954 arg->count = arg->skip; 1955 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) { 1956 if (!tc_qdisc_stats_dump(sch, ntx + 1, arg)) 1957 break; 1958 } 1959 } 1960 1961 static struct netdev_queue *taprio_select_queue(struct Qdisc *sch, 1962 struct tcmsg *tcm) 1963 { 1964 return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent)); 1965 } 1966 1967 static const struct Qdisc_class_ops taprio_class_ops = { 1968 .graft = taprio_graft, 1969 .leaf = taprio_leaf, 1970 .find = taprio_find, 1971 .walk = taprio_walk, 1972 .dump = taprio_dump_class, 1973 .dump_stats = taprio_dump_class_stats, 1974 .select_queue = taprio_select_queue, 1975 }; 1976 1977 static struct Qdisc_ops taprio_qdisc_ops __read_mostly = { 1978 .cl_ops = &taprio_class_ops, 1979 .id = "taprio", 1980 .priv_size = sizeof(struct taprio_sched), 1981 .init = taprio_init, 1982 .change = taprio_change, 1983 .destroy = taprio_destroy, 1984 .reset = taprio_reset, 1985 .attach = taprio_attach, 1986 .peek = taprio_peek, 1987 .dequeue = taprio_dequeue, 1988 .enqueue = taprio_enqueue, 1989 .dump = taprio_dump, 1990 .owner = THIS_MODULE, 1991 }; 1992 1993 static struct notifier_block taprio_device_notifier = { 1994 .notifier_call = taprio_dev_notifier, 1995 }; 1996 1997 static int __init taprio_module_init(void) 1998 { 1999 int err = register_netdevice_notifier(&taprio_device_notifier); 2000 2001 if (err) 2002 return err; 2003 2004 return register_qdisc(&taprio_qdisc_ops); 2005 } 2006 2007 static void __exit taprio_module_exit(void) 2008 { 2009 unregister_qdisc(&taprio_qdisc_ops); 2010 unregister_netdevice_notifier(&taprio_device_notifier); 2011 } 2012 2013 module_init(taprio_module_init); 2014 module_exit(taprio_module_exit); 2015 MODULE_LICENSE("GPL"); 2016