15a781ccbSVinicius Costa Gomes // SPDX-License-Identifier: GPL-2.0 25a781ccbSVinicius Costa Gomes 35a781ccbSVinicius Costa Gomes /* net/sched/sch_taprio.c Time Aware Priority Scheduler 45a781ccbSVinicius Costa Gomes * 55a781ccbSVinicius Costa Gomes * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com> 65a781ccbSVinicius Costa Gomes * 75a781ccbSVinicius Costa Gomes */ 85a781ccbSVinicius Costa Gomes 9cc69837fSJakub Kicinski #include <linux/ethtool.h> 105a781ccbSVinicius Costa Gomes #include <linux/types.h> 115a781ccbSVinicius Costa Gomes #include <linux/slab.h> 125a781ccbSVinicius Costa Gomes #include <linux/kernel.h> 135a781ccbSVinicius Costa Gomes #include <linux/string.h> 145a781ccbSVinicius Costa Gomes #include <linux/list.h> 155a781ccbSVinicius Costa Gomes #include <linux/errno.h> 165a781ccbSVinicius Costa Gomes #include <linux/skbuff.h> 1723bddf69SJakub Kicinski #include <linux/math64.h> 185a781ccbSVinicius Costa Gomes #include <linux/module.h> 195a781ccbSVinicius Costa Gomes #include <linux/spinlock.h> 20a3d43c0dSVinicius Costa Gomes #include <linux/rcupdate.h> 21837ced3aSVladimir Oltean #include <linux/time.h> 225a781ccbSVinicius Costa Gomes #include <net/netlink.h> 235a781ccbSVinicius Costa Gomes #include <net/pkt_sched.h> 245a781ccbSVinicius Costa Gomes #include <net/pkt_cls.h> 255a781ccbSVinicius Costa Gomes #include <net/sch_generic.h> 264cfd5779SVedang Patel #include <net/sock.h> 2754002066SVedang Patel #include <net/tcp.h> 285a781ccbSVinicius Costa Gomes 297b9eba7bSLeandro Dorileo static LIST_HEAD(taprio_list); 307b9eba7bSLeandro Dorileo 315a781ccbSVinicius Costa Gomes #define TAPRIO_ALL_GATES_OPEN -1 325a781ccbSVinicius Costa Gomes 334cfd5779SVedang Patel #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) 349c66d156SVinicius Costa Gomes #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD) 35a9d62274SVinicius Costa Gomes #define TAPRIO_FLAGS_INVALID U32_MAX 364cfd5779SVedang Patel 375a781ccbSVinicius Costa Gomes struct sched_entry { 385a781ccbSVinicius Costa Gomes struct list_head list; 395a781ccbSVinicius Costa Gomes 405a781ccbSVinicius Costa Gomes /* The instant that this entry "closes" and the next one 415a781ccbSVinicius Costa Gomes * should open, the qdisc will make some effort so that no 425a781ccbSVinicius Costa Gomes * packet leaves after this time. 435a781ccbSVinicius Costa Gomes */ 445a781ccbSVinicius Costa Gomes ktime_t close_time; 454cfd5779SVedang Patel ktime_t next_txtime; 465a781ccbSVinicius Costa Gomes atomic_t budget; 475a781ccbSVinicius Costa Gomes int index; 485a781ccbSVinicius Costa Gomes u32 gate_mask; 495a781ccbSVinicius Costa Gomes u32 interval; 505a781ccbSVinicius Costa Gomes u8 command; 515a781ccbSVinicius Costa Gomes }; 525a781ccbSVinicius Costa Gomes 53a3d43c0dSVinicius Costa Gomes struct sched_gate_list { 54a3d43c0dSVinicius Costa Gomes struct rcu_head rcu; 55a3d43c0dSVinicius Costa Gomes struct list_head entries; 56a3d43c0dSVinicius Costa Gomes size_t num_entries; 576ca6a665SVinicius Costa Gomes ktime_t cycle_close_time; 586ca6a665SVinicius Costa Gomes s64 cycle_time; 59c25031e9SVinicius Costa Gomes s64 cycle_time_extension; 60a3d43c0dSVinicius Costa Gomes s64 base_time; 61a3d43c0dSVinicius Costa Gomes }; 62a3d43c0dSVinicius Costa Gomes 635a781ccbSVinicius Costa Gomes struct taprio_sched { 645a781ccbSVinicius Costa Gomes struct Qdisc **qdiscs; 655a781ccbSVinicius Costa Gomes struct Qdisc *root; 664cfd5779SVedang Patel u32 flags; 677ede7b03SVedang Patel enum tk_offsets tk_offset; 685a781ccbSVinicius Costa Gomes int clockid; 69db46e3a8SVladimir Oltean bool offloaded; 707b9eba7bSLeandro Dorileo atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+ 715a781ccbSVinicius Costa Gomes * speeds it's sub-nanoseconds per byte 725a781ccbSVinicius Costa Gomes */ 735a781ccbSVinicius Costa Gomes 745a781ccbSVinicius Costa Gomes /* Protects the update side of the RCU protected current_entry */ 755a781ccbSVinicius Costa Gomes spinlock_t current_entry_lock; 765a781ccbSVinicius Costa Gomes struct sched_entry __rcu *current_entry; 77a3d43c0dSVinicius Costa Gomes struct sched_gate_list __rcu *oper_sched; 78a3d43c0dSVinicius Costa Gomes struct sched_gate_list __rcu *admin_sched; 795a781ccbSVinicius Costa Gomes struct hrtimer advance_timer; 807b9eba7bSLeandro Dorileo struct list_head taprio_list; 81a54fc09eSVladimir Oltean u32 max_frm_len[TC_MAX_QUEUE]; /* for the fast path */ 82a54fc09eSVladimir Oltean u32 max_sdu[TC_MAX_QUEUE]; /* for dump and offloading */ 83a5b64700SVedang Patel u32 txtime_delay; 845a781ccbSVinicius Costa Gomes }; 855a781ccbSVinicius Costa Gomes 869c66d156SVinicius Costa Gomes struct __tc_taprio_qopt_offload { 879c66d156SVinicius Costa Gomes refcount_t users; 889c66d156SVinicius Costa Gomes struct tc_taprio_qopt_offload offload; 899c66d156SVinicius Costa Gomes }; 909c66d156SVinicius Costa Gomes 91a3d43c0dSVinicius Costa Gomes static ktime_t sched_base_time(const struct sched_gate_list *sched) 92a3d43c0dSVinicius Costa Gomes { 93a3d43c0dSVinicius Costa Gomes if (!sched) 94a3d43c0dSVinicius Costa Gomes return KTIME_MAX; 95a3d43c0dSVinicius Costa Gomes 96a3d43c0dSVinicius Costa Gomes return ns_to_ktime(sched->base_time); 97a3d43c0dSVinicius Costa Gomes } 98a3d43c0dSVinicius Costa Gomes 996dc25401SEric Dumazet static ktime_t taprio_mono_to_any(const struct taprio_sched *q, ktime_t mono) 1007ede7b03SVedang Patel { 1016dc25401SEric Dumazet /* This pairs with WRITE_ONCE() in taprio_parse_clockid() */ 1026dc25401SEric Dumazet enum tk_offsets tk_offset = READ_ONCE(q->tk_offset); 1037ede7b03SVedang Patel 1046dc25401SEric Dumazet switch (tk_offset) { 1057ede7b03SVedang Patel case TK_OFFS_MAX: 1067ede7b03SVedang Patel return mono; 1077ede7b03SVedang Patel default: 1086dc25401SEric Dumazet return ktime_mono_to_any(mono, tk_offset); 1096dc25401SEric Dumazet } 1107ede7b03SVedang Patel } 1117ede7b03SVedang Patel 1126dc25401SEric Dumazet static ktime_t taprio_get_time(const struct taprio_sched *q) 1136dc25401SEric Dumazet { 1146dc25401SEric Dumazet return taprio_mono_to_any(q, ktime_get()); 1157ede7b03SVedang Patel } 1167ede7b03SVedang Patel 117a3d43c0dSVinicius Costa Gomes static void taprio_free_sched_cb(struct rcu_head *head) 118a3d43c0dSVinicius Costa Gomes { 119a3d43c0dSVinicius Costa Gomes struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu); 120a3d43c0dSVinicius Costa Gomes struct sched_entry *entry, *n; 121a3d43c0dSVinicius Costa Gomes 122a3d43c0dSVinicius Costa Gomes list_for_each_entry_safe(entry, n, &sched->entries, list) { 123a3d43c0dSVinicius Costa Gomes list_del(&entry->list); 124a3d43c0dSVinicius Costa Gomes kfree(entry); 125a3d43c0dSVinicius Costa Gomes } 126a3d43c0dSVinicius Costa Gomes 127a3d43c0dSVinicius Costa Gomes kfree(sched); 128a3d43c0dSVinicius Costa Gomes } 129a3d43c0dSVinicius Costa Gomes 130a3d43c0dSVinicius Costa Gomes static void switch_schedules(struct taprio_sched *q, 131a3d43c0dSVinicius Costa Gomes struct sched_gate_list **admin, 132a3d43c0dSVinicius Costa Gomes struct sched_gate_list **oper) 133a3d43c0dSVinicius Costa Gomes { 134a3d43c0dSVinicius Costa Gomes rcu_assign_pointer(q->oper_sched, *admin); 135a3d43c0dSVinicius Costa Gomes rcu_assign_pointer(q->admin_sched, NULL); 136a3d43c0dSVinicius Costa Gomes 137a3d43c0dSVinicius Costa Gomes if (*oper) 138a3d43c0dSVinicius Costa Gomes call_rcu(&(*oper)->rcu, taprio_free_sched_cb); 139a3d43c0dSVinicius Costa Gomes 140a3d43c0dSVinicius Costa Gomes *oper = *admin; 141a3d43c0dSVinicius Costa Gomes *admin = NULL; 142a3d43c0dSVinicius Costa Gomes } 143a3d43c0dSVinicius Costa Gomes 1444cfd5779SVedang Patel /* Get how much time has been already elapsed in the current cycle. */ 1454cfd5779SVedang Patel static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time) 1464cfd5779SVedang Patel { 1474cfd5779SVedang Patel ktime_t time_since_sched_start; 1484cfd5779SVedang Patel s32 time_elapsed; 1494cfd5779SVedang Patel 1504cfd5779SVedang Patel time_since_sched_start = ktime_sub(time, sched->base_time); 1514cfd5779SVedang Patel div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed); 1524cfd5779SVedang Patel 1534cfd5779SVedang Patel return time_elapsed; 1544cfd5779SVedang Patel } 1554cfd5779SVedang Patel 1564cfd5779SVedang Patel static ktime_t get_interval_end_time(struct sched_gate_list *sched, 1574cfd5779SVedang Patel struct sched_gate_list *admin, 1584cfd5779SVedang Patel struct sched_entry *entry, 1594cfd5779SVedang Patel ktime_t intv_start) 1604cfd5779SVedang Patel { 1614cfd5779SVedang Patel s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start); 1624cfd5779SVedang Patel ktime_t intv_end, cycle_ext_end, cycle_end; 1634cfd5779SVedang Patel 1644cfd5779SVedang Patel cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed); 1654cfd5779SVedang Patel intv_end = ktime_add_ns(intv_start, entry->interval); 1664cfd5779SVedang Patel cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension); 1674cfd5779SVedang Patel 1684cfd5779SVedang Patel if (ktime_before(intv_end, cycle_end)) 1694cfd5779SVedang Patel return intv_end; 1704cfd5779SVedang Patel else if (admin && admin != sched && 1714cfd5779SVedang Patel ktime_after(admin->base_time, cycle_end) && 1724cfd5779SVedang Patel ktime_before(admin->base_time, cycle_ext_end)) 1734cfd5779SVedang Patel return admin->base_time; 1744cfd5779SVedang Patel else 1754cfd5779SVedang Patel return cycle_end; 1764cfd5779SVedang Patel } 1774cfd5779SVedang Patel 1784cfd5779SVedang Patel static int length_to_duration(struct taprio_sched *q, int len) 1794cfd5779SVedang Patel { 180837ced3aSVladimir Oltean return div_u64(len * atomic64_read(&q->picos_per_byte), PSEC_PER_NSEC); 1814cfd5779SVedang Patel } 1824cfd5779SVedang Patel 1834cfd5779SVedang Patel /* Returns the entry corresponding to next available interval. If 1844cfd5779SVedang Patel * validate_interval is set, it only validates whether the timestamp occurs 1854cfd5779SVedang Patel * when the gate corresponding to the skb's traffic class is open. 1864cfd5779SVedang Patel */ 1874cfd5779SVedang Patel static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb, 1884cfd5779SVedang Patel struct Qdisc *sch, 1894cfd5779SVedang Patel struct sched_gate_list *sched, 1904cfd5779SVedang Patel struct sched_gate_list *admin, 1914cfd5779SVedang Patel ktime_t time, 1924cfd5779SVedang Patel ktime_t *interval_start, 1934cfd5779SVedang Patel ktime_t *interval_end, 1944cfd5779SVedang Patel bool validate_interval) 1954cfd5779SVedang Patel { 1964cfd5779SVedang Patel ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time; 1974cfd5779SVedang Patel ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time; 1984cfd5779SVedang Patel struct sched_entry *entry = NULL, *entry_found = NULL; 1994cfd5779SVedang Patel struct taprio_sched *q = qdisc_priv(sch); 2004cfd5779SVedang Patel struct net_device *dev = qdisc_dev(sch); 2014cfd5779SVedang Patel bool entry_available = false; 2024cfd5779SVedang Patel s32 cycle_elapsed; 2034cfd5779SVedang Patel int tc, n; 2044cfd5779SVedang Patel 2054cfd5779SVedang Patel tc = netdev_get_prio_tc_map(dev, skb->priority); 2064cfd5779SVedang Patel packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb)); 2074cfd5779SVedang Patel 2084cfd5779SVedang Patel *interval_start = 0; 2094cfd5779SVedang Patel *interval_end = 0; 2104cfd5779SVedang Patel 2114cfd5779SVedang Patel if (!sched) 2124cfd5779SVedang Patel return NULL; 2134cfd5779SVedang Patel 2144cfd5779SVedang Patel cycle = sched->cycle_time; 2154cfd5779SVedang Patel cycle_elapsed = get_cycle_time_elapsed(sched, time); 2164cfd5779SVedang Patel curr_intv_end = ktime_sub_ns(time, cycle_elapsed); 2174cfd5779SVedang Patel cycle_end = ktime_add_ns(curr_intv_end, cycle); 2184cfd5779SVedang Patel 2194cfd5779SVedang Patel list_for_each_entry(entry, &sched->entries, list) { 2204cfd5779SVedang Patel curr_intv_start = curr_intv_end; 2214cfd5779SVedang Patel curr_intv_end = get_interval_end_time(sched, admin, entry, 2224cfd5779SVedang Patel curr_intv_start); 2234cfd5779SVedang Patel 2244cfd5779SVedang Patel if (ktime_after(curr_intv_start, cycle_end)) 2254cfd5779SVedang Patel break; 2264cfd5779SVedang Patel 2274cfd5779SVedang Patel if (!(entry->gate_mask & BIT(tc)) || 2284cfd5779SVedang Patel packet_transmit_time > entry->interval) 2294cfd5779SVedang Patel continue; 2304cfd5779SVedang Patel 2314cfd5779SVedang Patel txtime = entry->next_txtime; 2324cfd5779SVedang Patel 2334cfd5779SVedang Patel if (ktime_before(txtime, time) || validate_interval) { 2344cfd5779SVedang Patel transmit_end_time = ktime_add_ns(time, packet_transmit_time); 2354cfd5779SVedang Patel if ((ktime_before(curr_intv_start, time) && 2364cfd5779SVedang Patel ktime_before(transmit_end_time, curr_intv_end)) || 2374cfd5779SVedang Patel (ktime_after(curr_intv_start, time) && !validate_interval)) { 2384cfd5779SVedang Patel entry_found = entry; 2394cfd5779SVedang Patel *interval_start = curr_intv_start; 2404cfd5779SVedang Patel *interval_end = curr_intv_end; 2414cfd5779SVedang Patel break; 2424cfd5779SVedang Patel } else if (!entry_available && !validate_interval) { 2434cfd5779SVedang Patel /* Here, we are just trying to find out the 2444cfd5779SVedang Patel * first available interval in the next cycle. 2454cfd5779SVedang Patel */ 2460deee7aaSJiapeng Zhong entry_available = true; 2474cfd5779SVedang Patel entry_found = entry; 2484cfd5779SVedang Patel *interval_start = ktime_add_ns(curr_intv_start, cycle); 2494cfd5779SVedang Patel *interval_end = ktime_add_ns(curr_intv_end, cycle); 2504cfd5779SVedang Patel } 2514cfd5779SVedang Patel } else if (ktime_before(txtime, earliest_txtime) && 2524cfd5779SVedang Patel !entry_available) { 2534cfd5779SVedang Patel earliest_txtime = txtime; 2544cfd5779SVedang Patel entry_found = entry; 2554cfd5779SVedang Patel n = div_s64(ktime_sub(txtime, curr_intv_start), cycle); 2564cfd5779SVedang Patel *interval_start = ktime_add(curr_intv_start, n * cycle); 2574cfd5779SVedang Patel *interval_end = ktime_add(curr_intv_end, n * cycle); 2584cfd5779SVedang Patel } 2594cfd5779SVedang Patel } 2604cfd5779SVedang Patel 2614cfd5779SVedang Patel return entry_found; 2624cfd5779SVedang Patel } 2634cfd5779SVedang Patel 2644cfd5779SVedang Patel static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch) 2654cfd5779SVedang Patel { 2664cfd5779SVedang Patel struct taprio_sched *q = qdisc_priv(sch); 2674cfd5779SVedang Patel struct sched_gate_list *sched, *admin; 2684cfd5779SVedang Patel ktime_t interval_start, interval_end; 2694cfd5779SVedang Patel struct sched_entry *entry; 2704cfd5779SVedang Patel 2714cfd5779SVedang Patel rcu_read_lock(); 2724cfd5779SVedang Patel sched = rcu_dereference(q->oper_sched); 2734cfd5779SVedang Patel admin = rcu_dereference(q->admin_sched); 2744cfd5779SVedang Patel 2754cfd5779SVedang Patel entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp, 2764cfd5779SVedang Patel &interval_start, &interval_end, true); 2774cfd5779SVedang Patel rcu_read_unlock(); 2784cfd5779SVedang Patel 2794cfd5779SVedang Patel return entry; 2804cfd5779SVedang Patel } 2814cfd5779SVedang Patel 2829c66d156SVinicius Costa Gomes static bool taprio_flags_valid(u32 flags) 2839c66d156SVinicius Costa Gomes { 2849c66d156SVinicius Costa Gomes /* Make sure no other flag bits are set. */ 2859c66d156SVinicius Costa Gomes if (flags & ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST | 2869c66d156SVinicius Costa Gomes TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)) 2879c66d156SVinicius Costa Gomes return false; 2889c66d156SVinicius Costa Gomes /* txtime-assist and full offload are mutually exclusive */ 2899c66d156SVinicius Costa Gomes if ((flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) && 2909c66d156SVinicius Costa Gomes (flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)) 2919c66d156SVinicius Costa Gomes return false; 2929c66d156SVinicius Costa Gomes return true; 2939c66d156SVinicius Costa Gomes } 2949c66d156SVinicius Costa Gomes 29554002066SVedang Patel /* This returns the tstamp value set by TCP in terms of the set clock. */ 29654002066SVedang Patel static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb) 29754002066SVedang Patel { 29854002066SVedang Patel unsigned int offset = skb_network_offset(skb); 29954002066SVedang Patel const struct ipv6hdr *ipv6h; 30054002066SVedang Patel const struct iphdr *iph; 30154002066SVedang Patel struct ipv6hdr _ipv6h; 30254002066SVedang Patel 30354002066SVedang Patel ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h); 30454002066SVedang Patel if (!ipv6h) 30554002066SVedang Patel return 0; 30654002066SVedang Patel 30754002066SVedang Patel if (ipv6h->version == 4) { 30854002066SVedang Patel iph = (struct iphdr *)ipv6h; 30954002066SVedang Patel offset += iph->ihl * 4; 31054002066SVedang Patel 31154002066SVedang Patel /* special-case 6in4 tunnelling, as that is a common way to get 31254002066SVedang Patel * v6 connectivity in the home 31354002066SVedang Patel */ 31454002066SVedang Patel if (iph->protocol == IPPROTO_IPV6) { 31554002066SVedang Patel ipv6h = skb_header_pointer(skb, offset, 31654002066SVedang Patel sizeof(_ipv6h), &_ipv6h); 31754002066SVedang Patel 31854002066SVedang Patel if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP) 31954002066SVedang Patel return 0; 32054002066SVedang Patel } else if (iph->protocol != IPPROTO_TCP) { 32154002066SVedang Patel return 0; 32254002066SVedang Patel } 32354002066SVedang Patel } else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) { 32454002066SVedang Patel return 0; 32554002066SVedang Patel } 32654002066SVedang Patel 3276dc25401SEric Dumazet return taprio_mono_to_any(q, skb->skb_mstamp_ns); 32854002066SVedang Patel } 32954002066SVedang Patel 3304cfd5779SVedang Patel /* There are a few scenarios where we will have to modify the txtime from 3314cfd5779SVedang Patel * what is read from next_txtime in sched_entry. They are: 3324cfd5779SVedang Patel * 1. If txtime is in the past, 3334cfd5779SVedang Patel * a. The gate for the traffic class is currently open and packet can be 3344cfd5779SVedang Patel * transmitted before it closes, schedule the packet right away. 3354cfd5779SVedang Patel * b. If the gate corresponding to the traffic class is going to open later 3364cfd5779SVedang Patel * in the cycle, set the txtime of packet to the interval start. 3374cfd5779SVedang Patel * 2. If txtime is in the future, there are packets corresponding to the 3384cfd5779SVedang Patel * current traffic class waiting to be transmitted. So, the following 3394cfd5779SVedang Patel * possibilities exist: 3404cfd5779SVedang Patel * a. We can transmit the packet before the window containing the txtime 3414cfd5779SVedang Patel * closes. 3424cfd5779SVedang Patel * b. The window might close before the transmission can be completed 3434cfd5779SVedang Patel * successfully. So, schedule the packet in the next open window. 3444cfd5779SVedang Patel */ 3454cfd5779SVedang Patel static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch) 3464cfd5779SVedang Patel { 34754002066SVedang Patel ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp; 3484cfd5779SVedang Patel struct taprio_sched *q = qdisc_priv(sch); 3494cfd5779SVedang Patel struct sched_gate_list *sched, *admin; 3504cfd5779SVedang Patel ktime_t minimum_time, now, txtime; 3514cfd5779SVedang Patel int len, packet_transmit_time; 3524cfd5779SVedang Patel struct sched_entry *entry; 3534cfd5779SVedang Patel bool sched_changed; 3544cfd5779SVedang Patel 3557ede7b03SVedang Patel now = taprio_get_time(q); 3564cfd5779SVedang Patel minimum_time = ktime_add_ns(now, q->txtime_delay); 3574cfd5779SVedang Patel 35854002066SVedang Patel tcp_tstamp = get_tcp_tstamp(q, skb); 35954002066SVedang Patel minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp); 36054002066SVedang Patel 3614cfd5779SVedang Patel rcu_read_lock(); 3624cfd5779SVedang Patel admin = rcu_dereference(q->admin_sched); 3634cfd5779SVedang Patel sched = rcu_dereference(q->oper_sched); 3644cfd5779SVedang Patel if (admin && ktime_after(minimum_time, admin->base_time)) 3654cfd5779SVedang Patel switch_schedules(q, &admin, &sched); 3664cfd5779SVedang Patel 3674cfd5779SVedang Patel /* Until the schedule starts, all the queues are open */ 3684cfd5779SVedang Patel if (!sched || ktime_before(minimum_time, sched->base_time)) { 3694cfd5779SVedang Patel txtime = minimum_time; 3704cfd5779SVedang Patel goto done; 3714cfd5779SVedang Patel } 3724cfd5779SVedang Patel 3734cfd5779SVedang Patel len = qdisc_pkt_len(skb); 3744cfd5779SVedang Patel packet_transmit_time = length_to_duration(q, len); 3754cfd5779SVedang Patel 3764cfd5779SVedang Patel do { 3770deee7aaSJiapeng Zhong sched_changed = false; 3784cfd5779SVedang Patel 3794cfd5779SVedang Patel entry = find_entry_to_transmit(skb, sch, sched, admin, 3804cfd5779SVedang Patel minimum_time, 3814cfd5779SVedang Patel &interval_start, &interval_end, 3824cfd5779SVedang Patel false); 3834cfd5779SVedang Patel if (!entry) { 3844cfd5779SVedang Patel txtime = 0; 3854cfd5779SVedang Patel goto done; 3864cfd5779SVedang Patel } 3874cfd5779SVedang Patel 3884cfd5779SVedang Patel txtime = entry->next_txtime; 3894cfd5779SVedang Patel txtime = max_t(ktime_t, txtime, minimum_time); 3904cfd5779SVedang Patel txtime = max_t(ktime_t, txtime, interval_start); 3914cfd5779SVedang Patel 3924cfd5779SVedang Patel if (admin && admin != sched && 3934cfd5779SVedang Patel ktime_after(txtime, admin->base_time)) { 3944cfd5779SVedang Patel sched = admin; 3950deee7aaSJiapeng Zhong sched_changed = true; 3964cfd5779SVedang Patel continue; 3974cfd5779SVedang Patel } 3984cfd5779SVedang Patel 3994cfd5779SVedang Patel transmit_end_time = ktime_add(txtime, packet_transmit_time); 4004cfd5779SVedang Patel minimum_time = transmit_end_time; 4014cfd5779SVedang Patel 4024cfd5779SVedang Patel /* Update the txtime of current entry to the next time it's 4034cfd5779SVedang Patel * interval starts. 4044cfd5779SVedang Patel */ 4054cfd5779SVedang Patel if (ktime_after(transmit_end_time, interval_end)) 4064cfd5779SVedang Patel entry->next_txtime = ktime_add(interval_start, sched->cycle_time); 4074cfd5779SVedang Patel } while (sched_changed || ktime_after(transmit_end_time, interval_end)); 4084cfd5779SVedang Patel 4094cfd5779SVedang Patel entry->next_txtime = transmit_end_time; 4104cfd5779SVedang Patel 4114cfd5779SVedang Patel done: 4124cfd5779SVedang Patel rcu_read_unlock(); 4134cfd5779SVedang Patel return txtime; 4144cfd5779SVedang Patel } 4154cfd5779SVedang Patel 416497cc002SKurt Kanzenbach static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch, 417497cc002SKurt Kanzenbach struct Qdisc *child, struct sk_buff **to_free) 4185a781ccbSVinicius Costa Gomes { 4195a781ccbSVinicius Costa Gomes struct taprio_sched *q = qdisc_priv(sch); 420a54fc09eSVladimir Oltean struct net_device *dev = qdisc_dev(sch); 421a54fc09eSVladimir Oltean int prio = skb->priority; 422a54fc09eSVladimir Oltean u8 tc; 4235a781ccbSVinicius Costa Gomes 424e8a64bbaSBenedikt Spranger /* sk_flags are only safe to use on full sockets. */ 425e8a64bbaSBenedikt Spranger if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) { 4264cfd5779SVedang Patel if (!is_valid_interval(skb, sch)) 4274cfd5779SVedang Patel return qdisc_drop(skb, sch, to_free); 4284cfd5779SVedang Patel } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { 4294cfd5779SVedang Patel skb->tstamp = get_packet_txtime(skb, sch); 4304cfd5779SVedang Patel if (!skb->tstamp) 4314cfd5779SVedang Patel return qdisc_drop(skb, sch, to_free); 4324cfd5779SVedang Patel } 4334cfd5779SVedang Patel 434a54fc09eSVladimir Oltean /* Devices with full offload are expected to honor this in hardware */ 435a54fc09eSVladimir Oltean tc = netdev_get_prio_tc_map(dev, prio); 436a54fc09eSVladimir Oltean if (skb->len > q->max_frm_len[tc]) 437a54fc09eSVladimir Oltean return qdisc_drop(skb, sch, to_free); 438a54fc09eSVladimir Oltean 4395a781ccbSVinicius Costa Gomes qdisc_qstats_backlog_inc(sch, skb); 4405a781ccbSVinicius Costa Gomes sch->q.qlen++; 4415a781ccbSVinicius Costa Gomes 442ac5c66f2SPetr Machata return qdisc_enqueue(skb, child, to_free); 4435a781ccbSVinicius Costa Gomes } 4445a781ccbSVinicius Costa Gomes 4452c08a4f8SVladimir Oltean /* Will not be called in the full offload case, since the TX queues are 4462c08a4f8SVladimir Oltean * attached to the Qdisc created using qdisc_create_dflt() 4472c08a4f8SVladimir Oltean */ 448497cc002SKurt Kanzenbach static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, 449497cc002SKurt Kanzenbach struct sk_buff **to_free) 450497cc002SKurt Kanzenbach { 451497cc002SKurt Kanzenbach struct taprio_sched *q = qdisc_priv(sch); 452497cc002SKurt Kanzenbach struct Qdisc *child; 453497cc002SKurt Kanzenbach int queue; 454497cc002SKurt Kanzenbach 455497cc002SKurt Kanzenbach queue = skb_get_queue_mapping(skb); 456497cc002SKurt Kanzenbach 457497cc002SKurt Kanzenbach child = q->qdiscs[queue]; 458497cc002SKurt Kanzenbach if (unlikely(!child)) 459497cc002SKurt Kanzenbach return qdisc_drop(skb, sch, to_free); 460497cc002SKurt Kanzenbach 461497cc002SKurt Kanzenbach /* Large packets might not be transmitted when the transmission duration 462497cc002SKurt Kanzenbach * exceeds any configured interval. Therefore, segment the skb into 463fa65eddeSVladimir Oltean * smaller chunks. Drivers with full offload are expected to handle 464fa65eddeSVladimir Oltean * this in hardware. 465497cc002SKurt Kanzenbach */ 466fa65eddeSVladimir Oltean if (skb_is_gso(skb)) { 467497cc002SKurt Kanzenbach unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb); 468497cc002SKurt Kanzenbach netdev_features_t features = netif_skb_features(skb); 469497cc002SKurt Kanzenbach struct sk_buff *segs, *nskb; 470497cc002SKurt Kanzenbach int ret; 471497cc002SKurt Kanzenbach 472497cc002SKurt Kanzenbach segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); 473497cc002SKurt Kanzenbach if (IS_ERR_OR_NULL(segs)) 474497cc002SKurt Kanzenbach return qdisc_drop(skb, sch, to_free); 475497cc002SKurt Kanzenbach 476497cc002SKurt Kanzenbach skb_list_walk_safe(segs, segs, nskb) { 477497cc002SKurt Kanzenbach skb_mark_not_on_list(segs); 478497cc002SKurt Kanzenbach qdisc_skb_cb(segs)->pkt_len = segs->len; 479497cc002SKurt Kanzenbach slen += segs->len; 480497cc002SKurt Kanzenbach 481497cc002SKurt Kanzenbach ret = taprio_enqueue_one(segs, sch, child, to_free); 482497cc002SKurt Kanzenbach if (ret != NET_XMIT_SUCCESS) { 483497cc002SKurt Kanzenbach if (net_xmit_drop_count(ret)) 484497cc002SKurt Kanzenbach qdisc_qstats_drop(sch); 485497cc002SKurt Kanzenbach } else { 486497cc002SKurt Kanzenbach numsegs++; 487497cc002SKurt Kanzenbach } 488497cc002SKurt Kanzenbach } 489497cc002SKurt Kanzenbach 490497cc002SKurt Kanzenbach if (numsegs > 1) 491497cc002SKurt Kanzenbach qdisc_tree_reduce_backlog(sch, 1 - numsegs, len - slen); 492497cc002SKurt Kanzenbach consume_skb(skb); 493497cc002SKurt Kanzenbach 494497cc002SKurt Kanzenbach return numsegs > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; 495497cc002SKurt Kanzenbach } 496497cc002SKurt Kanzenbach 497497cc002SKurt Kanzenbach return taprio_enqueue_one(skb, sch, child, to_free); 498497cc002SKurt Kanzenbach } 499497cc002SKurt Kanzenbach 5002c08a4f8SVladimir Oltean /* Will not be called in the full offload case, since the TX queues are 5012c08a4f8SVladimir Oltean * attached to the Qdisc created using qdisc_create_dflt() 5022c08a4f8SVladimir Oltean */ 50325becba6SVladimir Oltean static struct sk_buff *taprio_peek(struct Qdisc *sch) 5045a781ccbSVinicius Costa Gomes { 5055a781ccbSVinicius Costa Gomes struct taprio_sched *q = qdisc_priv(sch); 5065a781ccbSVinicius Costa Gomes struct net_device *dev = qdisc_dev(sch); 5075a781ccbSVinicius Costa Gomes struct sched_entry *entry; 5085a781ccbSVinicius Costa Gomes struct sk_buff *skb; 5095a781ccbSVinicius Costa Gomes u32 gate_mask; 5105a781ccbSVinicius Costa Gomes int i; 5115a781ccbSVinicius Costa Gomes 5125a781ccbSVinicius Costa Gomes rcu_read_lock(); 5135a781ccbSVinicius Costa Gomes entry = rcu_dereference(q->current_entry); 5142684d1b7SAndre Guedes gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN; 5155a781ccbSVinicius Costa Gomes rcu_read_unlock(); 5165a781ccbSVinicius Costa Gomes 5175a781ccbSVinicius Costa Gomes if (!gate_mask) 5185a781ccbSVinicius Costa Gomes return NULL; 5195a781ccbSVinicius Costa Gomes 5205a781ccbSVinicius Costa Gomes for (i = 0; i < dev->num_tx_queues; i++) { 5215a781ccbSVinicius Costa Gomes struct Qdisc *child = q->qdiscs[i]; 5225a781ccbSVinicius Costa Gomes int prio; 5235a781ccbSVinicius Costa Gomes u8 tc; 5245a781ccbSVinicius Costa Gomes 5255a781ccbSVinicius Costa Gomes if (unlikely(!child)) 5265a781ccbSVinicius Costa Gomes continue; 5275a781ccbSVinicius Costa Gomes 5285a781ccbSVinicius Costa Gomes skb = child->ops->peek(child); 5295a781ccbSVinicius Costa Gomes if (!skb) 5305a781ccbSVinicius Costa Gomes continue; 5315a781ccbSVinicius Costa Gomes 5324cfd5779SVedang Patel if (TXTIME_ASSIST_IS_ENABLED(q->flags)) 5334cfd5779SVedang Patel return skb; 5344cfd5779SVedang Patel 5355a781ccbSVinicius Costa Gomes prio = skb->priority; 5365a781ccbSVinicius Costa Gomes tc = netdev_get_prio_tc_map(dev, prio); 5375a781ccbSVinicius Costa Gomes 5385a781ccbSVinicius Costa Gomes if (!(gate_mask & BIT(tc))) 5392684d1b7SAndre Guedes continue; 5405a781ccbSVinicius Costa Gomes 5415a781ccbSVinicius Costa Gomes return skb; 5425a781ccbSVinicius Costa Gomes } 5435a781ccbSVinicius Costa Gomes 5445a781ccbSVinicius Costa Gomes return NULL; 5455a781ccbSVinicius Costa Gomes } 5465a781ccbSVinicius Costa Gomes 54723bddf69SJakub Kicinski static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry) 54823bddf69SJakub Kicinski { 54923bddf69SJakub Kicinski atomic_set(&entry->budget, 550837ced3aSVladimir Oltean div64_u64((u64)entry->interval * PSEC_PER_NSEC, 55123bddf69SJakub Kicinski atomic64_read(&q->picos_per_byte))); 5525a781ccbSVinicius Costa Gomes } 5535a781ccbSVinicius Costa Gomes 5542c08a4f8SVladimir Oltean /* Will not be called in the full offload case, since the TX queues are 5552c08a4f8SVladimir Oltean * attached to the Qdisc created using qdisc_create_dflt() 5562c08a4f8SVladimir Oltean */ 55725becba6SVladimir Oltean static struct sk_buff *taprio_dequeue(struct Qdisc *sch) 5585a781ccbSVinicius Costa Gomes { 5595a781ccbSVinicius Costa Gomes struct taprio_sched *q = qdisc_priv(sch); 5605a781ccbSVinicius Costa Gomes struct net_device *dev = qdisc_dev(sch); 5618c79f0eaSVinicius Costa Gomes struct sk_buff *skb = NULL; 5625a781ccbSVinicius Costa Gomes struct sched_entry *entry; 5635a781ccbSVinicius Costa Gomes u32 gate_mask; 5645a781ccbSVinicius Costa Gomes int i; 5655a781ccbSVinicius Costa Gomes 5665a781ccbSVinicius Costa Gomes rcu_read_lock(); 5675a781ccbSVinicius Costa Gomes entry = rcu_dereference(q->current_entry); 5685a781ccbSVinicius Costa Gomes /* if there's no entry, it means that the schedule didn't 5695a781ccbSVinicius Costa Gomes * start yet, so force all gates to be open, this is in 5705a781ccbSVinicius Costa Gomes * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5 571633fa666SJesper Dangaard Brouer * "AdminGateStates" 5725a781ccbSVinicius Costa Gomes */ 5735a781ccbSVinicius Costa Gomes gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN; 5745a781ccbSVinicius Costa Gomes 5755a781ccbSVinicius Costa Gomes if (!gate_mask) 5768c79f0eaSVinicius Costa Gomes goto done; 5775a781ccbSVinicius Costa Gomes 5785a781ccbSVinicius Costa Gomes for (i = 0; i < dev->num_tx_queues; i++) { 5795a781ccbSVinicius Costa Gomes struct Qdisc *child = q->qdiscs[i]; 5805a781ccbSVinicius Costa Gomes ktime_t guard; 5815a781ccbSVinicius Costa Gomes int prio; 5825a781ccbSVinicius Costa Gomes int len; 5835a781ccbSVinicius Costa Gomes u8 tc; 5845a781ccbSVinicius Costa Gomes 5855a781ccbSVinicius Costa Gomes if (unlikely(!child)) 5865a781ccbSVinicius Costa Gomes continue; 5875a781ccbSVinicius Costa Gomes 5884cfd5779SVedang Patel if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { 5894cfd5779SVedang Patel skb = child->ops->dequeue(child); 5904cfd5779SVedang Patel if (!skb) 5914cfd5779SVedang Patel continue; 5924cfd5779SVedang Patel goto skb_found; 5934cfd5779SVedang Patel } 5944cfd5779SVedang Patel 5955a781ccbSVinicius Costa Gomes skb = child->ops->peek(child); 5965a781ccbSVinicius Costa Gomes if (!skb) 5975a781ccbSVinicius Costa Gomes continue; 5985a781ccbSVinicius Costa Gomes 5995a781ccbSVinicius Costa Gomes prio = skb->priority; 6005a781ccbSVinicius Costa Gomes tc = netdev_get_prio_tc_map(dev, prio); 6015a781ccbSVinicius Costa Gomes 602b09fe70eSVinicius Costa Gomes if (!(gate_mask & BIT(tc))) { 603b09fe70eSVinicius Costa Gomes skb = NULL; 6045a781ccbSVinicius Costa Gomes continue; 605b09fe70eSVinicius Costa Gomes } 6065a781ccbSVinicius Costa Gomes 6075a781ccbSVinicius Costa Gomes len = qdisc_pkt_len(skb); 6087ede7b03SVedang Patel guard = ktime_add_ns(taprio_get_time(q), 6095a781ccbSVinicius Costa Gomes length_to_duration(q, len)); 6105a781ccbSVinicius Costa Gomes 6115a781ccbSVinicius Costa Gomes /* In the case that there's no gate entry, there's no 6125a781ccbSVinicius Costa Gomes * guard band ... 6135a781ccbSVinicius Costa Gomes */ 6145a781ccbSVinicius Costa Gomes if (gate_mask != TAPRIO_ALL_GATES_OPEN && 615b09fe70eSVinicius Costa Gomes ktime_after(guard, entry->close_time)) { 616b09fe70eSVinicius Costa Gomes skb = NULL; 6176e734c82SAndre Guedes continue; 618b09fe70eSVinicius Costa Gomes } 6195a781ccbSVinicius Costa Gomes 6205a781ccbSVinicius Costa Gomes /* ... and no budget. */ 6215a781ccbSVinicius Costa Gomes if (gate_mask != TAPRIO_ALL_GATES_OPEN && 622b09fe70eSVinicius Costa Gomes atomic_sub_return(len, &entry->budget) < 0) { 623b09fe70eSVinicius Costa Gomes skb = NULL; 6246e734c82SAndre Guedes continue; 625b09fe70eSVinicius Costa Gomes } 6265a781ccbSVinicius Costa Gomes 6275a781ccbSVinicius Costa Gomes skb = child->ops->dequeue(child); 6285a781ccbSVinicius Costa Gomes if (unlikely(!skb)) 6298c79f0eaSVinicius Costa Gomes goto done; 6305a781ccbSVinicius Costa Gomes 6314cfd5779SVedang Patel skb_found: 6325a781ccbSVinicius Costa Gomes qdisc_bstats_update(sch, skb); 6335a781ccbSVinicius Costa Gomes qdisc_qstats_backlog_dec(sch, skb); 6345a781ccbSVinicius Costa Gomes sch->q.qlen--; 6355a781ccbSVinicius Costa Gomes 6368c79f0eaSVinicius Costa Gomes goto done; 6375a781ccbSVinicius Costa Gomes } 6385a781ccbSVinicius Costa Gomes 6398c79f0eaSVinicius Costa Gomes done: 6408c79f0eaSVinicius Costa Gomes rcu_read_unlock(); 6418c79f0eaSVinicius Costa Gomes 6428c79f0eaSVinicius Costa Gomes return skb; 6435a781ccbSVinicius Costa Gomes } 6445a781ccbSVinicius Costa Gomes 6456ca6a665SVinicius Costa Gomes static bool should_restart_cycle(const struct sched_gate_list *oper, 6466ca6a665SVinicius Costa Gomes const struct sched_entry *entry) 6476ca6a665SVinicius Costa Gomes { 6486ca6a665SVinicius Costa Gomes if (list_is_last(&entry->list, &oper->entries)) 6496ca6a665SVinicius Costa Gomes return true; 6506ca6a665SVinicius Costa Gomes 6516ca6a665SVinicius Costa Gomes if (ktime_compare(entry->close_time, oper->cycle_close_time) == 0) 6526ca6a665SVinicius Costa Gomes return true; 6536ca6a665SVinicius Costa Gomes 6546ca6a665SVinicius Costa Gomes return false; 6556ca6a665SVinicius Costa Gomes } 6566ca6a665SVinicius Costa Gomes 657a3d43c0dSVinicius Costa Gomes static bool should_change_schedules(const struct sched_gate_list *admin, 658a3d43c0dSVinicius Costa Gomes const struct sched_gate_list *oper, 659a3d43c0dSVinicius Costa Gomes ktime_t close_time) 660a3d43c0dSVinicius Costa Gomes { 661c25031e9SVinicius Costa Gomes ktime_t next_base_time, extension_time; 662a3d43c0dSVinicius Costa Gomes 663a3d43c0dSVinicius Costa Gomes if (!admin) 664a3d43c0dSVinicius Costa Gomes return false; 665a3d43c0dSVinicius Costa Gomes 666a3d43c0dSVinicius Costa Gomes next_base_time = sched_base_time(admin); 667a3d43c0dSVinicius Costa Gomes 668a3d43c0dSVinicius Costa Gomes /* This is the simple case, the close_time would fall after 669a3d43c0dSVinicius Costa Gomes * the next schedule base_time. 670a3d43c0dSVinicius Costa Gomes */ 671a3d43c0dSVinicius Costa Gomes if (ktime_compare(next_base_time, close_time) <= 0) 672a3d43c0dSVinicius Costa Gomes return true; 673a3d43c0dSVinicius Costa Gomes 674c25031e9SVinicius Costa Gomes /* This is the cycle_time_extension case, if the close_time 675c25031e9SVinicius Costa Gomes * plus the amount that can be extended would fall after the 676c25031e9SVinicius Costa Gomes * next schedule base_time, we can extend the current schedule 677c25031e9SVinicius Costa Gomes * for that amount. 678c25031e9SVinicius Costa Gomes */ 679c25031e9SVinicius Costa Gomes extension_time = ktime_add_ns(close_time, oper->cycle_time_extension); 680c25031e9SVinicius Costa Gomes 681c25031e9SVinicius Costa Gomes /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about 682c25031e9SVinicius Costa Gomes * how precisely the extension should be made. So after 683c25031e9SVinicius Costa Gomes * conformance testing, this logic may change. 684c25031e9SVinicius Costa Gomes */ 685c25031e9SVinicius Costa Gomes if (ktime_compare(next_base_time, extension_time) <= 0) 686c25031e9SVinicius Costa Gomes return true; 687c25031e9SVinicius Costa Gomes 688a3d43c0dSVinicius Costa Gomes return false; 689a3d43c0dSVinicius Costa Gomes } 690a3d43c0dSVinicius Costa Gomes 6915a781ccbSVinicius Costa Gomes static enum hrtimer_restart advance_sched(struct hrtimer *timer) 6925a781ccbSVinicius Costa Gomes { 6935a781ccbSVinicius Costa Gomes struct taprio_sched *q = container_of(timer, struct taprio_sched, 6945a781ccbSVinicius Costa Gomes advance_timer); 695a3d43c0dSVinicius Costa Gomes struct sched_gate_list *oper, *admin; 6965a781ccbSVinicius Costa Gomes struct sched_entry *entry, *next; 6975a781ccbSVinicius Costa Gomes struct Qdisc *sch = q->root; 6985a781ccbSVinicius Costa Gomes ktime_t close_time; 6995a781ccbSVinicius Costa Gomes 7005a781ccbSVinicius Costa Gomes spin_lock(&q->current_entry_lock); 7015a781ccbSVinicius Costa Gomes entry = rcu_dereference_protected(q->current_entry, 7025a781ccbSVinicius Costa Gomes lockdep_is_held(&q->current_entry_lock)); 703a3d43c0dSVinicius Costa Gomes oper = rcu_dereference_protected(q->oper_sched, 704a3d43c0dSVinicius Costa Gomes lockdep_is_held(&q->current_entry_lock)); 705a3d43c0dSVinicius Costa Gomes admin = rcu_dereference_protected(q->admin_sched, 706a3d43c0dSVinicius Costa Gomes lockdep_is_held(&q->current_entry_lock)); 7075a781ccbSVinicius Costa Gomes 708a3d43c0dSVinicius Costa Gomes if (!oper) 709a3d43c0dSVinicius Costa Gomes switch_schedules(q, &admin, &oper); 710a3d43c0dSVinicius Costa Gomes 711a3d43c0dSVinicius Costa Gomes /* This can happen in two cases: 1. this is the very first run 712a3d43c0dSVinicius Costa Gomes * of this function (i.e. we weren't running any schedule 713a3d43c0dSVinicius Costa Gomes * previously); 2. The previous schedule just ended. The first 714a3d43c0dSVinicius Costa Gomes * entry of all schedules are pre-calculated during the 715a3d43c0dSVinicius Costa Gomes * schedule initialization. 7165a781ccbSVinicius Costa Gomes */ 717a3d43c0dSVinicius Costa Gomes if (unlikely(!entry || entry->close_time == oper->base_time)) { 718a3d43c0dSVinicius Costa Gomes next = list_first_entry(&oper->entries, struct sched_entry, 7195a781ccbSVinicius Costa Gomes list); 7205a781ccbSVinicius Costa Gomes close_time = next->close_time; 7215a781ccbSVinicius Costa Gomes goto first_run; 7225a781ccbSVinicius Costa Gomes } 7235a781ccbSVinicius Costa Gomes 7246ca6a665SVinicius Costa Gomes if (should_restart_cycle(oper, entry)) { 725a3d43c0dSVinicius Costa Gomes next = list_first_entry(&oper->entries, struct sched_entry, 7265a781ccbSVinicius Costa Gomes list); 7276ca6a665SVinicius Costa Gomes oper->cycle_close_time = ktime_add_ns(oper->cycle_close_time, 7286ca6a665SVinicius Costa Gomes oper->cycle_time); 7296ca6a665SVinicius Costa Gomes } else { 7305a781ccbSVinicius Costa Gomes next = list_next_entry(entry, list); 7316ca6a665SVinicius Costa Gomes } 7325a781ccbSVinicius Costa Gomes 7335a781ccbSVinicius Costa Gomes close_time = ktime_add_ns(entry->close_time, next->interval); 7346ca6a665SVinicius Costa Gomes close_time = min_t(ktime_t, close_time, oper->cycle_close_time); 7355a781ccbSVinicius Costa Gomes 736a3d43c0dSVinicius Costa Gomes if (should_change_schedules(admin, oper, close_time)) { 737a3d43c0dSVinicius Costa Gomes /* Set things so the next time this runs, the new 738a3d43c0dSVinicius Costa Gomes * schedule runs. 739a3d43c0dSVinicius Costa Gomes */ 740a3d43c0dSVinicius Costa Gomes close_time = sched_base_time(admin); 741a3d43c0dSVinicius Costa Gomes switch_schedules(q, &admin, &oper); 742a3d43c0dSVinicius Costa Gomes } 743a3d43c0dSVinicius Costa Gomes 7445a781ccbSVinicius Costa Gomes next->close_time = close_time; 74523bddf69SJakub Kicinski taprio_set_budget(q, next); 7465a781ccbSVinicius Costa Gomes 7475a781ccbSVinicius Costa Gomes first_run: 7485a781ccbSVinicius Costa Gomes rcu_assign_pointer(q->current_entry, next); 7495a781ccbSVinicius Costa Gomes spin_unlock(&q->current_entry_lock); 7505a781ccbSVinicius Costa Gomes 7515a781ccbSVinicius Costa Gomes hrtimer_set_expires(&q->advance_timer, close_time); 7525a781ccbSVinicius Costa Gomes 7535a781ccbSVinicius Costa Gomes rcu_read_lock(); 7545a781ccbSVinicius Costa Gomes __netif_schedule(sch); 7555a781ccbSVinicius Costa Gomes rcu_read_unlock(); 7565a781ccbSVinicius Costa Gomes 7575a781ccbSVinicius Costa Gomes return HRTIMER_RESTART; 7585a781ccbSVinicius Costa Gomes } 7595a781ccbSVinicius Costa Gomes 7605a781ccbSVinicius Costa Gomes static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { 7615a781ccbSVinicius Costa Gomes [TCA_TAPRIO_SCHED_ENTRY_INDEX] = { .type = NLA_U32 }, 7625a781ccbSVinicius Costa Gomes [TCA_TAPRIO_SCHED_ENTRY_CMD] = { .type = NLA_U8 }, 7635a781ccbSVinicius Costa Gomes [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 }, 7645a781ccbSVinicius Costa Gomes [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 }, 7655a781ccbSVinicius Costa Gomes }; 7665a781ccbSVinicius Costa Gomes 767a54fc09eSVladimir Oltean static const struct nla_policy taprio_tc_policy[TCA_TAPRIO_TC_ENTRY_MAX + 1] = { 768a54fc09eSVladimir Oltean [TCA_TAPRIO_TC_ENTRY_INDEX] = { .type = NLA_U32 }, 769a54fc09eSVladimir Oltean [TCA_TAPRIO_TC_ENTRY_MAX_SDU] = { .type = NLA_U32 }, 770a54fc09eSVladimir Oltean }; 771a54fc09eSVladimir Oltean 7725a781ccbSVinicius Costa Gomes static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { 7735a781ccbSVinicius Costa Gomes [TCA_TAPRIO_ATTR_PRIOMAP] = { 7745a781ccbSVinicius Costa Gomes .len = sizeof(struct tc_mqprio_qopt) 7755a781ccbSVinicius Costa Gomes }, 7765a781ccbSVinicius Costa Gomes [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED }, 7775a781ccbSVinicius Costa Gomes [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 }, 7785a781ccbSVinicius Costa Gomes [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED }, 7795a781ccbSVinicius Costa Gomes [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 }, 7806ca6a665SVinicius Costa Gomes [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 }, 781c25031e9SVinicius Costa Gomes [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 }, 78249c684d7SVinicius Costa Gomes [TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 }, 783e13aaa06SJakub Kicinski [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 }, 784a54fc09eSVladimir Oltean [TCA_TAPRIO_ATTR_TC_ENTRY] = { .type = NLA_NESTED }, 7855a781ccbSVinicius Costa Gomes }; 7865a781ccbSVinicius Costa Gomes 787b5b73b26SVinicius Costa Gomes static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb, 788b5b73b26SVinicius Costa Gomes struct sched_entry *entry, 7895a781ccbSVinicius Costa Gomes struct netlink_ext_ack *extack) 7905a781ccbSVinicius Costa Gomes { 791b5b73b26SVinicius Costa Gomes int min_duration = length_to_duration(q, ETH_ZLEN); 7925a781ccbSVinicius Costa Gomes u32 interval = 0; 7935a781ccbSVinicius Costa Gomes 7945a781ccbSVinicius Costa Gomes if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD]) 7955a781ccbSVinicius Costa Gomes entry->command = nla_get_u8( 7965a781ccbSVinicius Costa Gomes tb[TCA_TAPRIO_SCHED_ENTRY_CMD]); 7975a781ccbSVinicius Costa Gomes 7985a781ccbSVinicius Costa Gomes if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]) 7995a781ccbSVinicius Costa Gomes entry->gate_mask = nla_get_u32( 8005a781ccbSVinicius Costa Gomes tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]); 8015a781ccbSVinicius Costa Gomes 8025a781ccbSVinicius Costa Gomes if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]) 8035a781ccbSVinicius Costa Gomes interval = nla_get_u32( 8045a781ccbSVinicius Costa Gomes tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]); 8055a781ccbSVinicius Costa Gomes 806b5b73b26SVinicius Costa Gomes /* The interval should allow at least the minimum ethernet 807b5b73b26SVinicius Costa Gomes * frame to go out. 808b5b73b26SVinicius Costa Gomes */ 809b5b73b26SVinicius Costa Gomes if (interval < min_duration) { 8105a781ccbSVinicius Costa Gomes NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry"); 8115a781ccbSVinicius Costa Gomes return -EINVAL; 8125a781ccbSVinicius Costa Gomes } 8135a781ccbSVinicius Costa Gomes 8145a781ccbSVinicius Costa Gomes entry->interval = interval; 8155a781ccbSVinicius Costa Gomes 8165a781ccbSVinicius Costa Gomes return 0; 8175a781ccbSVinicius Costa Gomes } 8185a781ccbSVinicius Costa Gomes 819b5b73b26SVinicius Costa Gomes static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n, 820b5b73b26SVinicius Costa Gomes struct sched_entry *entry, int index, 821b5b73b26SVinicius Costa Gomes struct netlink_ext_ack *extack) 8225a781ccbSVinicius Costa Gomes { 8235a781ccbSVinicius Costa Gomes struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { }; 8245a781ccbSVinicius Costa Gomes int err; 8255a781ccbSVinicius Costa Gomes 8268cb08174SJohannes Berg err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n, 8275a781ccbSVinicius Costa Gomes entry_policy, NULL); 8285a781ccbSVinicius Costa Gomes if (err < 0) { 8295a781ccbSVinicius Costa Gomes NL_SET_ERR_MSG(extack, "Could not parse nested entry"); 8305a781ccbSVinicius Costa Gomes return -EINVAL; 8315a781ccbSVinicius Costa Gomes } 8325a781ccbSVinicius Costa Gomes 8335a781ccbSVinicius Costa Gomes entry->index = index; 8345a781ccbSVinicius Costa Gomes 835b5b73b26SVinicius Costa Gomes return fill_sched_entry(q, tb, entry, extack); 8365a781ccbSVinicius Costa Gomes } 8375a781ccbSVinicius Costa Gomes 838b5b73b26SVinicius Costa Gomes static int parse_sched_list(struct taprio_sched *q, struct nlattr *list, 839a3d43c0dSVinicius Costa Gomes struct sched_gate_list *sched, 8405a781ccbSVinicius Costa Gomes struct netlink_ext_ack *extack) 8415a781ccbSVinicius Costa Gomes { 8425a781ccbSVinicius Costa Gomes struct nlattr *n; 8435a781ccbSVinicius Costa Gomes int err, rem; 8445a781ccbSVinicius Costa Gomes int i = 0; 8455a781ccbSVinicius Costa Gomes 8465a781ccbSVinicius Costa Gomes if (!list) 8475a781ccbSVinicius Costa Gomes return -EINVAL; 8485a781ccbSVinicius Costa Gomes 8495a781ccbSVinicius Costa Gomes nla_for_each_nested(n, list, rem) { 8505a781ccbSVinicius Costa Gomes struct sched_entry *entry; 8515a781ccbSVinicius Costa Gomes 8525a781ccbSVinicius Costa Gomes if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) { 8535a781ccbSVinicius Costa Gomes NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'"); 8545a781ccbSVinicius Costa Gomes continue; 8555a781ccbSVinicius Costa Gomes } 8565a781ccbSVinicius Costa Gomes 8575a781ccbSVinicius Costa Gomes entry = kzalloc(sizeof(*entry), GFP_KERNEL); 8585a781ccbSVinicius Costa Gomes if (!entry) { 8595a781ccbSVinicius Costa Gomes NL_SET_ERR_MSG(extack, "Not enough memory for entry"); 8605a781ccbSVinicius Costa Gomes return -ENOMEM; 8615a781ccbSVinicius Costa Gomes } 8625a781ccbSVinicius Costa Gomes 863b5b73b26SVinicius Costa Gomes err = parse_sched_entry(q, n, entry, i, extack); 8645a781ccbSVinicius Costa Gomes if (err < 0) { 8655a781ccbSVinicius Costa Gomes kfree(entry); 8665a781ccbSVinicius Costa Gomes return err; 8675a781ccbSVinicius Costa Gomes } 8685a781ccbSVinicius Costa Gomes 869a3d43c0dSVinicius Costa Gomes list_add_tail(&entry->list, &sched->entries); 8705a781ccbSVinicius Costa Gomes i++; 8715a781ccbSVinicius Costa Gomes } 8725a781ccbSVinicius Costa Gomes 873a3d43c0dSVinicius Costa Gomes sched->num_entries = i; 8745a781ccbSVinicius Costa Gomes 8755a781ccbSVinicius Costa Gomes return i; 8765a781ccbSVinicius Costa Gomes } 8775a781ccbSVinicius Costa Gomes 878b5b73b26SVinicius Costa Gomes static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb, 879a3d43c0dSVinicius Costa Gomes struct sched_gate_list *new, 8805a781ccbSVinicius Costa Gomes struct netlink_ext_ack *extack) 8815a781ccbSVinicius Costa Gomes { 8825a781ccbSVinicius Costa Gomes int err = 0; 8835a781ccbSVinicius Costa Gomes 884a3d43c0dSVinicius Costa Gomes if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) { 885a3d43c0dSVinicius Costa Gomes NL_SET_ERR_MSG(extack, "Adding a single entry is not supported"); 886a3d43c0dSVinicius Costa Gomes return -ENOTSUPP; 887a3d43c0dSVinicius Costa Gomes } 8885a781ccbSVinicius Costa Gomes 8895a781ccbSVinicius Costa Gomes if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]) 890a3d43c0dSVinicius Costa Gomes new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]); 8915a781ccbSVinicius Costa Gomes 892c25031e9SVinicius Costa Gomes if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]) 893c25031e9SVinicius Costa Gomes new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]); 894c25031e9SVinicius Costa Gomes 8956ca6a665SVinicius Costa Gomes if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]) 8966ca6a665SVinicius Costa Gomes new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]); 8976ca6a665SVinicius Costa Gomes 8985a781ccbSVinicius Costa Gomes if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]) 899b5b73b26SVinicius Costa Gomes err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], 900b5b73b26SVinicius Costa Gomes new, extack); 901a3d43c0dSVinicius Costa Gomes if (err < 0) 9025a781ccbSVinicius Costa Gomes return err; 903a3d43c0dSVinicius Costa Gomes 904037be037SVedang Patel if (!new->cycle_time) { 905037be037SVedang Patel struct sched_entry *entry; 906037be037SVedang Patel ktime_t cycle = 0; 907037be037SVedang Patel 908037be037SVedang Patel list_for_each_entry(entry, &new->entries, list) 909037be037SVedang Patel cycle = ktime_add_ns(cycle, entry->interval); 910ed8157f1SDu Cheng 911ed8157f1SDu Cheng if (!cycle) { 912ed8157f1SDu Cheng NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0"); 913ed8157f1SDu Cheng return -EINVAL; 914ed8157f1SDu Cheng } 915ed8157f1SDu Cheng 916037be037SVedang Patel new->cycle_time = cycle; 917037be037SVedang Patel } 918037be037SVedang Patel 919a3d43c0dSVinicius Costa Gomes return 0; 9205a781ccbSVinicius Costa Gomes } 9215a781ccbSVinicius Costa Gomes 9225a781ccbSVinicius Costa Gomes static int taprio_parse_mqprio_opt(struct net_device *dev, 9235a781ccbSVinicius Costa Gomes struct tc_mqprio_qopt *qopt, 9244cfd5779SVedang Patel struct netlink_ext_ack *extack, 9254cfd5779SVedang Patel u32 taprio_flags) 9265a781ccbSVinicius Costa Gomes { 9275a781ccbSVinicius Costa Gomes int i, j; 9285a781ccbSVinicius Costa Gomes 929a3d43c0dSVinicius Costa Gomes if (!qopt && !dev->num_tc) { 9305a781ccbSVinicius Costa Gomes NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary"); 9315a781ccbSVinicius Costa Gomes return -EINVAL; 9325a781ccbSVinicius Costa Gomes } 9335a781ccbSVinicius Costa Gomes 934a3d43c0dSVinicius Costa Gomes /* If num_tc is already set, it means that the user already 935a3d43c0dSVinicius Costa Gomes * configured the mqprio part 936a3d43c0dSVinicius Costa Gomes */ 937a3d43c0dSVinicius Costa Gomes if (dev->num_tc) 938a3d43c0dSVinicius Costa Gomes return 0; 939a3d43c0dSVinicius Costa Gomes 9405a781ccbSVinicius Costa Gomes /* Verify num_tc is not out of max range */ 9415a781ccbSVinicius Costa Gomes if (qopt->num_tc > TC_MAX_QUEUE) { 9425a781ccbSVinicius Costa Gomes NL_SET_ERR_MSG(extack, "Number of traffic classes is outside valid range"); 9435a781ccbSVinicius Costa Gomes return -EINVAL; 9445a781ccbSVinicius Costa Gomes } 9455a781ccbSVinicius Costa Gomes 9465a781ccbSVinicius Costa Gomes /* taprio imposes that traffic classes map 1:n to tx queues */ 9475a781ccbSVinicius Costa Gomes if (qopt->num_tc > dev->num_tx_queues) { 9485a781ccbSVinicius Costa Gomes NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues"); 9495a781ccbSVinicius Costa Gomes return -EINVAL; 9505a781ccbSVinicius Costa Gomes } 9515a781ccbSVinicius Costa Gomes 9525a781ccbSVinicius Costa Gomes /* Verify priority mapping uses valid tcs */ 953b5a0faa3SIvan Khoronzhuk for (i = 0; i <= TC_BITMASK; i++) { 9545a781ccbSVinicius Costa Gomes if (qopt->prio_tc_map[i] >= qopt->num_tc) { 9555a781ccbSVinicius Costa Gomes NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping"); 9565a781ccbSVinicius Costa Gomes return -EINVAL; 9575a781ccbSVinicius Costa Gomes } 9585a781ccbSVinicius Costa Gomes } 9595a781ccbSVinicius Costa Gomes 9605a781ccbSVinicius Costa Gomes for (i = 0; i < qopt->num_tc; i++) { 9615a781ccbSVinicius Costa Gomes unsigned int last = qopt->offset[i] + qopt->count[i]; 9625a781ccbSVinicius Costa Gomes 9635a781ccbSVinicius Costa Gomes /* Verify the queue count is in tx range being equal to the 9645a781ccbSVinicius Costa Gomes * real_num_tx_queues indicates the last queue is in use. 9655a781ccbSVinicius Costa Gomes */ 9665a781ccbSVinicius Costa Gomes if (qopt->offset[i] >= dev->num_tx_queues || 9675a781ccbSVinicius Costa Gomes !qopt->count[i] || 9685a781ccbSVinicius Costa Gomes last > dev->real_num_tx_queues) { 9695a781ccbSVinicius Costa Gomes NL_SET_ERR_MSG(extack, "Invalid queue in traffic class to queue mapping"); 9705a781ccbSVinicius Costa Gomes return -EINVAL; 9715a781ccbSVinicius Costa Gomes } 9725a781ccbSVinicius Costa Gomes 9734cfd5779SVedang Patel if (TXTIME_ASSIST_IS_ENABLED(taprio_flags)) 9744cfd5779SVedang Patel continue; 9754cfd5779SVedang Patel 9765a781ccbSVinicius Costa Gomes /* Verify that the offset and counts do not overlap */ 9775a781ccbSVinicius Costa Gomes for (j = i + 1; j < qopt->num_tc; j++) { 9785a781ccbSVinicius Costa Gomes if (last > qopt->offset[j]) { 9795a781ccbSVinicius Costa Gomes NL_SET_ERR_MSG(extack, "Detected overlap in the traffic class to queue mapping"); 9805a781ccbSVinicius Costa Gomes return -EINVAL; 9815a781ccbSVinicius Costa Gomes } 9825a781ccbSVinicius Costa Gomes } 9835a781ccbSVinicius Costa Gomes } 9845a781ccbSVinicius Costa Gomes 9855a781ccbSVinicius Costa Gomes return 0; 9865a781ccbSVinicius Costa Gomes } 9875a781ccbSVinicius Costa Gomes 988a3d43c0dSVinicius Costa Gomes static int taprio_get_start_time(struct Qdisc *sch, 989a3d43c0dSVinicius Costa Gomes struct sched_gate_list *sched, 990a3d43c0dSVinicius Costa Gomes ktime_t *start) 9915a781ccbSVinicius Costa Gomes { 9925a781ccbSVinicius Costa Gomes struct taprio_sched *q = qdisc_priv(sch); 9935a781ccbSVinicius Costa Gomes ktime_t now, base, cycle; 9945a781ccbSVinicius Costa Gomes s64 n; 9955a781ccbSVinicius Costa Gomes 996a3d43c0dSVinicius Costa Gomes base = sched_base_time(sched); 9977ede7b03SVedang Patel now = taprio_get_time(q); 9988599099fSAndre Guedes 9998599099fSAndre Guedes if (ktime_after(base, now)) { 10008599099fSAndre Guedes *start = base; 10018599099fSAndre Guedes return 0; 10028599099fSAndre Guedes } 10035a781ccbSVinicius Costa Gomes 1004037be037SVedang Patel cycle = sched->cycle_time; 10055a781ccbSVinicius Costa Gomes 10068599099fSAndre Guedes /* The qdisc is expected to have at least one sched_entry. Moreover, 10078599099fSAndre Guedes * any entry must have 'interval' > 0. Thus if the cycle time is zero, 10088599099fSAndre Guedes * something went really wrong. In that case, we should warn about this 10098599099fSAndre Guedes * inconsistent state and return error. 10108599099fSAndre Guedes */ 10118599099fSAndre Guedes if (WARN_ON(!cycle)) 10128599099fSAndre Guedes return -EFAULT; 10135a781ccbSVinicius Costa Gomes 10145a781ccbSVinicius Costa Gomes /* Schedule the start time for the beginning of the next 10155a781ccbSVinicius Costa Gomes * cycle. 10165a781ccbSVinicius Costa Gomes */ 10175a781ccbSVinicius Costa Gomes n = div64_s64(ktime_sub_ns(now, base), cycle); 10188599099fSAndre Guedes *start = ktime_add_ns(base, (n + 1) * cycle); 10198599099fSAndre Guedes return 0; 10205a781ccbSVinicius Costa Gomes } 10215a781ccbSVinicius Costa Gomes 1022a3d43c0dSVinicius Costa Gomes static void setup_first_close_time(struct taprio_sched *q, 1023a3d43c0dSVinicius Costa Gomes struct sched_gate_list *sched, ktime_t base) 10245a781ccbSVinicius Costa Gomes { 10255a781ccbSVinicius Costa Gomes struct sched_entry *first; 10266ca6a665SVinicius Costa Gomes ktime_t cycle; 10275a781ccbSVinicius Costa Gomes 1028a3d43c0dSVinicius Costa Gomes first = list_first_entry(&sched->entries, 1029a3d43c0dSVinicius Costa Gomes struct sched_entry, list); 10305a781ccbSVinicius Costa Gomes 1031037be037SVedang Patel cycle = sched->cycle_time; 10326ca6a665SVinicius Costa Gomes 10336ca6a665SVinicius Costa Gomes /* FIXME: find a better place to do this */ 10346ca6a665SVinicius Costa Gomes sched->cycle_close_time = ktime_add_ns(base, cycle); 10356ca6a665SVinicius Costa Gomes 1036a3d43c0dSVinicius Costa Gomes first->close_time = ktime_add_ns(base, first->interval); 103723bddf69SJakub Kicinski taprio_set_budget(q, first); 10385a781ccbSVinicius Costa Gomes rcu_assign_pointer(q->current_entry, NULL); 1039a3d43c0dSVinicius Costa Gomes } 10405a781ccbSVinicius Costa Gomes 1041a3d43c0dSVinicius Costa Gomes static void taprio_start_sched(struct Qdisc *sch, 1042a3d43c0dSVinicius Costa Gomes ktime_t start, struct sched_gate_list *new) 1043a3d43c0dSVinicius Costa Gomes { 1044a3d43c0dSVinicius Costa Gomes struct taprio_sched *q = qdisc_priv(sch); 1045a3d43c0dSVinicius Costa Gomes ktime_t expires; 1046a3d43c0dSVinicius Costa Gomes 10479c66d156SVinicius Costa Gomes if (FULL_OFFLOAD_IS_ENABLED(q->flags)) 10489c66d156SVinicius Costa Gomes return; 10499c66d156SVinicius Costa Gomes 1050a3d43c0dSVinicius Costa Gomes expires = hrtimer_get_expires(&q->advance_timer); 1051a3d43c0dSVinicius Costa Gomes if (expires == 0) 1052a3d43c0dSVinicius Costa Gomes expires = KTIME_MAX; 1053a3d43c0dSVinicius Costa Gomes 1054a3d43c0dSVinicius Costa Gomes /* If the new schedule starts before the next expiration, we 1055a3d43c0dSVinicius Costa Gomes * reprogram it to the earliest one, so we change the admin 1056a3d43c0dSVinicius Costa Gomes * schedule to the operational one at the right time. 1057a3d43c0dSVinicius Costa Gomes */ 1058a3d43c0dSVinicius Costa Gomes start = min_t(ktime_t, start, expires); 10595a781ccbSVinicius Costa Gomes 10605a781ccbSVinicius Costa Gomes hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS); 10615a781ccbSVinicius Costa Gomes } 10625a781ccbSVinicius Costa Gomes 10637b9eba7bSLeandro Dorileo static void taprio_set_picos_per_byte(struct net_device *dev, 10647b9eba7bSLeandro Dorileo struct taprio_sched *q) 10657b9eba7bSLeandro Dorileo { 10667b9eba7bSLeandro Dorileo struct ethtool_link_ksettings ecmd; 1067f04b514cSVladimir Oltean int speed = SPEED_10; 1068f04b514cSVladimir Oltean int picos_per_byte; 1069f04b514cSVladimir Oltean int err; 10707b9eba7bSLeandro Dorileo 1071f04b514cSVladimir Oltean err = __ethtool_get_link_ksettings(dev, &ecmd); 1072f04b514cSVladimir Oltean if (err < 0) 1073f04b514cSVladimir Oltean goto skip; 1074f04b514cSVladimir Oltean 10759a9251a3SVladimir Oltean if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN) 1076f04b514cSVladimir Oltean speed = ecmd.base.speed; 1077f04b514cSVladimir Oltean 1078f04b514cSVladimir Oltean skip: 107968ce6688SVladimir Oltean picos_per_byte = (USEC_PER_SEC * 8) / speed; 10807b9eba7bSLeandro Dorileo 10817b9eba7bSLeandro Dorileo atomic64_set(&q->picos_per_byte, picos_per_byte); 10827b9eba7bSLeandro Dorileo netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n", 10837b9eba7bSLeandro Dorileo dev->name, (long long)atomic64_read(&q->picos_per_byte), 10847b9eba7bSLeandro Dorileo ecmd.base.speed); 10857b9eba7bSLeandro Dorileo } 10867b9eba7bSLeandro Dorileo 10877b9eba7bSLeandro Dorileo static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event, 10887b9eba7bSLeandro Dorileo void *ptr) 10897b9eba7bSLeandro Dorileo { 10907b9eba7bSLeandro Dorileo struct net_device *dev = netdev_notifier_info_to_dev(ptr); 10917b9eba7bSLeandro Dorileo struct taprio_sched *q; 10927b9eba7bSLeandro Dorileo 10937b9eba7bSLeandro Dorileo ASSERT_RTNL(); 10947b9eba7bSLeandro Dorileo 10957b9eba7bSLeandro Dorileo if (event != NETDEV_UP && event != NETDEV_CHANGE) 10967b9eba7bSLeandro Dorileo return NOTIFY_DONE; 10977b9eba7bSLeandro Dorileo 10987b9eba7bSLeandro Dorileo list_for_each_entry(q, &taprio_list, taprio_list) { 1099fc4f2fd0SVladimir Oltean if (dev != qdisc_dev(q->root)) 1100fc4f2fd0SVladimir Oltean continue; 1101fc4f2fd0SVladimir Oltean 1102fc4f2fd0SVladimir Oltean taprio_set_picos_per_byte(dev, q); 11037b9eba7bSLeandro Dorileo break; 11047b9eba7bSLeandro Dorileo } 11057b9eba7bSLeandro Dorileo 11067b9eba7bSLeandro Dorileo return NOTIFY_DONE; 11077b9eba7bSLeandro Dorileo } 11087b9eba7bSLeandro Dorileo 11094cfd5779SVedang Patel static void setup_txtime(struct taprio_sched *q, 11104cfd5779SVedang Patel struct sched_gate_list *sched, ktime_t base) 11114cfd5779SVedang Patel { 11124cfd5779SVedang Patel struct sched_entry *entry; 11134cfd5779SVedang Patel u32 interval = 0; 11144cfd5779SVedang Patel 11154cfd5779SVedang Patel list_for_each_entry(entry, &sched->entries, list) { 11164cfd5779SVedang Patel entry->next_txtime = ktime_add_ns(base, interval); 11174cfd5779SVedang Patel interval += entry->interval; 11184cfd5779SVedang Patel } 11194cfd5779SVedang Patel } 11204cfd5779SVedang Patel 11219c66d156SVinicius Costa Gomes static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries) 11229c66d156SVinicius Costa Gomes { 11239c66d156SVinicius Costa Gomes struct __tc_taprio_qopt_offload *__offload; 11249c66d156SVinicius Costa Gomes 112511a33de2SGustavo A. R. Silva __offload = kzalloc(struct_size(__offload, offload.entries, num_entries), 112611a33de2SGustavo A. R. Silva GFP_KERNEL); 11279c66d156SVinicius Costa Gomes if (!__offload) 11289c66d156SVinicius Costa Gomes return NULL; 11299c66d156SVinicius Costa Gomes 11309c66d156SVinicius Costa Gomes refcount_set(&__offload->users, 1); 11319c66d156SVinicius Costa Gomes 11329c66d156SVinicius Costa Gomes return &__offload->offload; 11339c66d156SVinicius Costa Gomes } 11349c66d156SVinicius Costa Gomes 11359c66d156SVinicius Costa Gomes struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload 11369c66d156SVinicius Costa Gomes *offload) 11379c66d156SVinicius Costa Gomes { 11389c66d156SVinicius Costa Gomes struct __tc_taprio_qopt_offload *__offload; 11399c66d156SVinicius Costa Gomes 11409c66d156SVinicius Costa Gomes __offload = container_of(offload, struct __tc_taprio_qopt_offload, 11419c66d156SVinicius Costa Gomes offload); 11429c66d156SVinicius Costa Gomes 11439c66d156SVinicius Costa Gomes refcount_inc(&__offload->users); 11449c66d156SVinicius Costa Gomes 11459c66d156SVinicius Costa Gomes return offload; 11469c66d156SVinicius Costa Gomes } 11479c66d156SVinicius Costa Gomes EXPORT_SYMBOL_GPL(taprio_offload_get); 11489c66d156SVinicius Costa Gomes 11499c66d156SVinicius Costa Gomes void taprio_offload_free(struct tc_taprio_qopt_offload *offload) 11509c66d156SVinicius Costa Gomes { 11519c66d156SVinicius Costa Gomes struct __tc_taprio_qopt_offload *__offload; 11529c66d156SVinicius Costa Gomes 11539c66d156SVinicius Costa Gomes __offload = container_of(offload, struct __tc_taprio_qopt_offload, 11549c66d156SVinicius Costa Gomes offload); 11559c66d156SVinicius Costa Gomes 11569c66d156SVinicius Costa Gomes if (!refcount_dec_and_test(&__offload->users)) 11579c66d156SVinicius Costa Gomes return; 11589c66d156SVinicius Costa Gomes 11599c66d156SVinicius Costa Gomes kfree(__offload); 11609c66d156SVinicius Costa Gomes } 11619c66d156SVinicius Costa Gomes EXPORT_SYMBOL_GPL(taprio_offload_free); 11629c66d156SVinicius Costa Gomes 11639c66d156SVinicius Costa Gomes /* The function will only serve to keep the pointers to the "oper" and "admin" 11649c66d156SVinicius Costa Gomes * schedules valid in relation to their base times, so when calling dump() the 11659c66d156SVinicius Costa Gomes * users looks at the right schedules. 11669c66d156SVinicius Costa Gomes * When using full offload, the admin configuration is promoted to oper at the 11679c66d156SVinicius Costa Gomes * base_time in the PHC time domain. But because the system time is not 11689c66d156SVinicius Costa Gomes * necessarily in sync with that, we can't just trigger a hrtimer to call 11699c66d156SVinicius Costa Gomes * switch_schedules at the right hardware time. 11709c66d156SVinicius Costa Gomes * At the moment we call this by hand right away from taprio, but in the future 11719c66d156SVinicius Costa Gomes * it will be useful to create a mechanism for drivers to notify taprio of the 11729c66d156SVinicius Costa Gomes * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump(). 11739c66d156SVinicius Costa Gomes * This is left as TODO. 11749c66d156SVinicius Costa Gomes */ 1175d665c128SYi Wang static void taprio_offload_config_changed(struct taprio_sched *q) 11769c66d156SVinicius Costa Gomes { 11779c66d156SVinicius Costa Gomes struct sched_gate_list *oper, *admin; 11789c66d156SVinicius Costa Gomes 1179c8cbe123SVladimir Oltean oper = rtnl_dereference(q->oper_sched); 1180c8cbe123SVladimir Oltean admin = rtnl_dereference(q->admin_sched); 11819c66d156SVinicius Costa Gomes 11829c66d156SVinicius Costa Gomes switch_schedules(q, &admin, &oper); 11839c66d156SVinicius Costa Gomes } 11849c66d156SVinicius Costa Gomes 118509e31cf0SVinicius Costa Gomes static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask) 118609e31cf0SVinicius Costa Gomes { 118709e31cf0SVinicius Costa Gomes u32 i, queue_mask = 0; 118809e31cf0SVinicius Costa Gomes 118909e31cf0SVinicius Costa Gomes for (i = 0; i < dev->num_tc; i++) { 119009e31cf0SVinicius Costa Gomes u32 offset, count; 119109e31cf0SVinicius Costa Gomes 119209e31cf0SVinicius Costa Gomes if (!(tc_mask & BIT(i))) 119309e31cf0SVinicius Costa Gomes continue; 119409e31cf0SVinicius Costa Gomes 119509e31cf0SVinicius Costa Gomes offset = dev->tc_to_txq[i].offset; 119609e31cf0SVinicius Costa Gomes count = dev->tc_to_txq[i].count; 119709e31cf0SVinicius Costa Gomes 119809e31cf0SVinicius Costa Gomes queue_mask |= GENMASK(offset + count - 1, offset); 119909e31cf0SVinicius Costa Gomes } 120009e31cf0SVinicius Costa Gomes 120109e31cf0SVinicius Costa Gomes return queue_mask; 120209e31cf0SVinicius Costa Gomes } 120309e31cf0SVinicius Costa Gomes 120409e31cf0SVinicius Costa Gomes static void taprio_sched_to_offload(struct net_device *dev, 12059c66d156SVinicius Costa Gomes struct sched_gate_list *sched, 12069c66d156SVinicius Costa Gomes struct tc_taprio_qopt_offload *offload) 12079c66d156SVinicius Costa Gomes { 12089c66d156SVinicius Costa Gomes struct sched_entry *entry; 12099c66d156SVinicius Costa Gomes int i = 0; 12109c66d156SVinicius Costa Gomes 12119c66d156SVinicius Costa Gomes offload->base_time = sched->base_time; 12129c66d156SVinicius Costa Gomes offload->cycle_time = sched->cycle_time; 12139c66d156SVinicius Costa Gomes offload->cycle_time_extension = sched->cycle_time_extension; 12149c66d156SVinicius Costa Gomes 12159c66d156SVinicius Costa Gomes list_for_each_entry(entry, &sched->entries, list) { 12169c66d156SVinicius Costa Gomes struct tc_taprio_sched_entry *e = &offload->entries[i]; 12179c66d156SVinicius Costa Gomes 12189c66d156SVinicius Costa Gomes e->command = entry->command; 12199c66d156SVinicius Costa Gomes e->interval = entry->interval; 122009e31cf0SVinicius Costa Gomes e->gate_mask = tc_map_to_queue_mask(dev, entry->gate_mask); 122109e31cf0SVinicius Costa Gomes 12229c66d156SVinicius Costa Gomes i++; 12239c66d156SVinicius Costa Gomes } 12249c66d156SVinicius Costa Gomes 12259c66d156SVinicius Costa Gomes offload->num_entries = i; 12269c66d156SVinicius Costa Gomes } 12279c66d156SVinicius Costa Gomes 12289c66d156SVinicius Costa Gomes static int taprio_enable_offload(struct net_device *dev, 12299c66d156SVinicius Costa Gomes struct taprio_sched *q, 12309c66d156SVinicius Costa Gomes struct sched_gate_list *sched, 12319c66d156SVinicius Costa Gomes struct netlink_ext_ack *extack) 12329c66d156SVinicius Costa Gomes { 12339c66d156SVinicius Costa Gomes const struct net_device_ops *ops = dev->netdev_ops; 12349c66d156SVinicius Costa Gomes struct tc_taprio_qopt_offload *offload; 1235a54fc09eSVladimir Oltean struct tc_taprio_caps caps; 1236a54fc09eSVladimir Oltean int tc, err = 0; 12379c66d156SVinicius Costa Gomes 12389c66d156SVinicius Costa Gomes if (!ops->ndo_setup_tc) { 12399c66d156SVinicius Costa Gomes NL_SET_ERR_MSG(extack, 12409c66d156SVinicius Costa Gomes "Device does not support taprio offload"); 12419c66d156SVinicius Costa Gomes return -EOPNOTSUPP; 12429c66d156SVinicius Costa Gomes } 12439c66d156SVinicius Costa Gomes 1244a54fc09eSVladimir Oltean qdisc_offload_query_caps(dev, TC_SETUP_QDISC_TAPRIO, 1245a54fc09eSVladimir Oltean &caps, sizeof(caps)); 1246a54fc09eSVladimir Oltean 1247a54fc09eSVladimir Oltean if (!caps.supports_queue_max_sdu) { 1248a54fc09eSVladimir Oltean for (tc = 0; tc < TC_MAX_QUEUE; tc++) { 1249a54fc09eSVladimir Oltean if (q->max_sdu[tc]) { 1250a54fc09eSVladimir Oltean NL_SET_ERR_MSG_MOD(extack, 1251a54fc09eSVladimir Oltean "Device does not handle queueMaxSDU"); 1252a54fc09eSVladimir Oltean return -EOPNOTSUPP; 1253a54fc09eSVladimir Oltean } 1254a54fc09eSVladimir Oltean } 1255a54fc09eSVladimir Oltean } 1256a54fc09eSVladimir Oltean 12579c66d156SVinicius Costa Gomes offload = taprio_offload_alloc(sched->num_entries); 12589c66d156SVinicius Costa Gomes if (!offload) { 12599c66d156SVinicius Costa Gomes NL_SET_ERR_MSG(extack, 12609c66d156SVinicius Costa Gomes "Not enough memory for enabling offload mode"); 12619c66d156SVinicius Costa Gomes return -ENOMEM; 12629c66d156SVinicius Costa Gomes } 12639c66d156SVinicius Costa Gomes offload->enable = 1; 126409e31cf0SVinicius Costa Gomes taprio_sched_to_offload(dev, sched, offload); 12659c66d156SVinicius Costa Gomes 1266a54fc09eSVladimir Oltean for (tc = 0; tc < TC_MAX_QUEUE; tc++) 1267a54fc09eSVladimir Oltean offload->max_sdu[tc] = q->max_sdu[tc]; 1268a54fc09eSVladimir Oltean 12699c66d156SVinicius Costa Gomes err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); 12709c66d156SVinicius Costa Gomes if (err < 0) { 12719c66d156SVinicius Costa Gomes NL_SET_ERR_MSG(extack, 12729c66d156SVinicius Costa Gomes "Device failed to setup taprio offload"); 12739c66d156SVinicius Costa Gomes goto done; 12749c66d156SVinicius Costa Gomes } 12759c66d156SVinicius Costa Gomes 1276db46e3a8SVladimir Oltean q->offloaded = true; 1277db46e3a8SVladimir Oltean 12789c66d156SVinicius Costa Gomes done: 12799c66d156SVinicius Costa Gomes taprio_offload_free(offload); 12809c66d156SVinicius Costa Gomes 12819c66d156SVinicius Costa Gomes return err; 12829c66d156SVinicius Costa Gomes } 12839c66d156SVinicius Costa Gomes 12849c66d156SVinicius Costa Gomes static int taprio_disable_offload(struct net_device *dev, 12859c66d156SVinicius Costa Gomes struct taprio_sched *q, 12869c66d156SVinicius Costa Gomes struct netlink_ext_ack *extack) 12879c66d156SVinicius Costa Gomes { 12889c66d156SVinicius Costa Gomes const struct net_device_ops *ops = dev->netdev_ops; 12899c66d156SVinicius Costa Gomes struct tc_taprio_qopt_offload *offload; 12909c66d156SVinicius Costa Gomes int err; 12919c66d156SVinicius Costa Gomes 1292db46e3a8SVladimir Oltean if (!q->offloaded) 12939c66d156SVinicius Costa Gomes return 0; 12949c66d156SVinicius Costa Gomes 12959c66d156SVinicius Costa Gomes offload = taprio_offload_alloc(0); 12969c66d156SVinicius Costa Gomes if (!offload) { 12979c66d156SVinicius Costa Gomes NL_SET_ERR_MSG(extack, 12989c66d156SVinicius Costa Gomes "Not enough memory to disable offload mode"); 12999c66d156SVinicius Costa Gomes return -ENOMEM; 13009c66d156SVinicius Costa Gomes } 13019c66d156SVinicius Costa Gomes offload->enable = 0; 13029c66d156SVinicius Costa Gomes 13039c66d156SVinicius Costa Gomes err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); 13049c66d156SVinicius Costa Gomes if (err < 0) { 13059c66d156SVinicius Costa Gomes NL_SET_ERR_MSG(extack, 13069c66d156SVinicius Costa Gomes "Device failed to disable offload"); 13079c66d156SVinicius Costa Gomes goto out; 13089c66d156SVinicius Costa Gomes } 13099c66d156SVinicius Costa Gomes 1310db46e3a8SVladimir Oltean q->offloaded = false; 1311db46e3a8SVladimir Oltean 13129c66d156SVinicius Costa Gomes out: 13139c66d156SVinicius Costa Gomes taprio_offload_free(offload); 13149c66d156SVinicius Costa Gomes 13159c66d156SVinicius Costa Gomes return err; 13169c66d156SVinicius Costa Gomes } 13179c66d156SVinicius Costa Gomes 13189c66d156SVinicius Costa Gomes /* If full offload is enabled, the only possible clockid is the net device's 13199c66d156SVinicius Costa Gomes * PHC. For that reason, specifying a clockid through netlink is incorrect. 13209c66d156SVinicius Costa Gomes * For txtime-assist, it is implicitly assumed that the device's PHC is kept 13219c66d156SVinicius Costa Gomes * in sync with the specified clockid via a user space daemon such as phc2sys. 13229c66d156SVinicius Costa Gomes * For both software taprio and txtime-assist, the clockid is used for the 13239c66d156SVinicius Costa Gomes * hrtimer that advances the schedule and hence mandatory. 13249c66d156SVinicius Costa Gomes */ 13259c66d156SVinicius Costa Gomes static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb, 13269c66d156SVinicius Costa Gomes struct netlink_ext_ack *extack) 13279c66d156SVinicius Costa Gomes { 13289c66d156SVinicius Costa Gomes struct taprio_sched *q = qdisc_priv(sch); 13299c66d156SVinicius Costa Gomes struct net_device *dev = qdisc_dev(sch); 13309c66d156SVinicius Costa Gomes int err = -EINVAL; 13319c66d156SVinicius Costa Gomes 13329c66d156SVinicius Costa Gomes if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { 13339c66d156SVinicius Costa Gomes const struct ethtool_ops *ops = dev->ethtool_ops; 13349c66d156SVinicius Costa Gomes struct ethtool_ts_info info = { 13359c66d156SVinicius Costa Gomes .cmd = ETHTOOL_GET_TS_INFO, 13369c66d156SVinicius Costa Gomes .phc_index = -1, 13379c66d156SVinicius Costa Gomes }; 13389c66d156SVinicius Costa Gomes 13399c66d156SVinicius Costa Gomes if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) { 13409c66d156SVinicius Costa Gomes NL_SET_ERR_MSG(extack, 13419c66d156SVinicius Costa Gomes "The 'clockid' cannot be specified for full offload"); 13429c66d156SVinicius Costa Gomes goto out; 13439c66d156SVinicius Costa Gomes } 13449c66d156SVinicius Costa Gomes 13459c66d156SVinicius Costa Gomes if (ops && ops->get_ts_info) 13469c66d156SVinicius Costa Gomes err = ops->get_ts_info(dev, &info); 13479c66d156SVinicius Costa Gomes 13489c66d156SVinicius Costa Gomes if (err || info.phc_index < 0) { 13499c66d156SVinicius Costa Gomes NL_SET_ERR_MSG(extack, 13509c66d156SVinicius Costa Gomes "Device does not have a PTP clock"); 13519c66d156SVinicius Costa Gomes err = -ENOTSUPP; 13529c66d156SVinicius Costa Gomes goto out; 13539c66d156SVinicius Costa Gomes } 13549c66d156SVinicius Costa Gomes } else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) { 13559c66d156SVinicius Costa Gomes int clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]); 13566dc25401SEric Dumazet enum tk_offsets tk_offset; 13579c66d156SVinicius Costa Gomes 13589c66d156SVinicius Costa Gomes /* We only support static clockids and we don't allow 13599c66d156SVinicius Costa Gomes * for it to be modified after the first init. 13609c66d156SVinicius Costa Gomes */ 13619c66d156SVinicius Costa Gomes if (clockid < 0 || 13629c66d156SVinicius Costa Gomes (q->clockid != -1 && q->clockid != clockid)) { 13639c66d156SVinicius Costa Gomes NL_SET_ERR_MSG(extack, 13649c66d156SVinicius Costa Gomes "Changing the 'clockid' of a running schedule is not supported"); 13659c66d156SVinicius Costa Gomes err = -ENOTSUPP; 13669c66d156SVinicius Costa Gomes goto out; 13679c66d156SVinicius Costa Gomes } 13689c66d156SVinicius Costa Gomes 13699c66d156SVinicius Costa Gomes switch (clockid) { 13709c66d156SVinicius Costa Gomes case CLOCK_REALTIME: 13716dc25401SEric Dumazet tk_offset = TK_OFFS_REAL; 13729c66d156SVinicius Costa Gomes break; 13739c66d156SVinicius Costa Gomes case CLOCK_MONOTONIC: 13746dc25401SEric Dumazet tk_offset = TK_OFFS_MAX; 13759c66d156SVinicius Costa Gomes break; 13769c66d156SVinicius Costa Gomes case CLOCK_BOOTTIME: 13776dc25401SEric Dumazet tk_offset = TK_OFFS_BOOT; 13789c66d156SVinicius Costa Gomes break; 13799c66d156SVinicius Costa Gomes case CLOCK_TAI: 13806dc25401SEric Dumazet tk_offset = TK_OFFS_TAI; 13819c66d156SVinicius Costa Gomes break; 13829c66d156SVinicius Costa Gomes default: 13839c66d156SVinicius Costa Gomes NL_SET_ERR_MSG(extack, "Invalid 'clockid'"); 13849c66d156SVinicius Costa Gomes err = -EINVAL; 13859c66d156SVinicius Costa Gomes goto out; 13869c66d156SVinicius Costa Gomes } 13876dc25401SEric Dumazet /* This pairs with READ_ONCE() in taprio_mono_to_any */ 13886dc25401SEric Dumazet WRITE_ONCE(q->tk_offset, tk_offset); 13899c66d156SVinicius Costa Gomes 13909c66d156SVinicius Costa Gomes q->clockid = clockid; 13919c66d156SVinicius Costa Gomes } else { 13929c66d156SVinicius Costa Gomes NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory"); 13939c66d156SVinicius Costa Gomes goto out; 13949c66d156SVinicius Costa Gomes } 1395a954380aSVinicius Costa Gomes 1396a954380aSVinicius Costa Gomes /* Everything went ok, return success. */ 1397a954380aSVinicius Costa Gomes err = 0; 1398a954380aSVinicius Costa Gomes 13999c66d156SVinicius Costa Gomes out: 14009c66d156SVinicius Costa Gomes return err; 14019c66d156SVinicius Costa Gomes } 14029c66d156SVinicius Costa Gomes 1403a54fc09eSVladimir Oltean static int taprio_parse_tc_entry(struct Qdisc *sch, 1404a54fc09eSVladimir Oltean struct nlattr *opt, 1405a54fc09eSVladimir Oltean u32 max_sdu[TC_QOPT_MAX_QUEUE], 1406a54fc09eSVladimir Oltean unsigned long *seen_tcs, 1407a54fc09eSVladimir Oltean struct netlink_ext_ack *extack) 1408a54fc09eSVladimir Oltean { 1409a54fc09eSVladimir Oltean struct nlattr *tb[TCA_TAPRIO_TC_ENTRY_MAX + 1] = { }; 1410a54fc09eSVladimir Oltean struct net_device *dev = qdisc_dev(sch); 1411a54fc09eSVladimir Oltean u32 val = 0; 1412a54fc09eSVladimir Oltean int err, tc; 1413a54fc09eSVladimir Oltean 1414a54fc09eSVladimir Oltean err = nla_parse_nested(tb, TCA_TAPRIO_TC_ENTRY_MAX, opt, 1415a54fc09eSVladimir Oltean taprio_tc_policy, extack); 1416a54fc09eSVladimir Oltean if (err < 0) 1417a54fc09eSVladimir Oltean return err; 1418a54fc09eSVladimir Oltean 1419a54fc09eSVladimir Oltean if (!tb[TCA_TAPRIO_TC_ENTRY_INDEX]) { 1420a54fc09eSVladimir Oltean NL_SET_ERR_MSG_MOD(extack, "TC entry index missing"); 1421a54fc09eSVladimir Oltean return -EINVAL; 1422a54fc09eSVladimir Oltean } 1423a54fc09eSVladimir Oltean 1424a54fc09eSVladimir Oltean tc = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_INDEX]); 1425a54fc09eSVladimir Oltean if (tc >= TC_QOPT_MAX_QUEUE) { 1426a54fc09eSVladimir Oltean NL_SET_ERR_MSG_MOD(extack, "TC entry index out of range"); 1427a54fc09eSVladimir Oltean return -ERANGE; 1428a54fc09eSVladimir Oltean } 1429a54fc09eSVladimir Oltean 1430a54fc09eSVladimir Oltean if (*seen_tcs & BIT(tc)) { 1431a54fc09eSVladimir Oltean NL_SET_ERR_MSG_MOD(extack, "Duplicate TC entry"); 1432a54fc09eSVladimir Oltean return -EINVAL; 1433a54fc09eSVladimir Oltean } 1434a54fc09eSVladimir Oltean 1435a54fc09eSVladimir Oltean *seen_tcs |= BIT(tc); 1436a54fc09eSVladimir Oltean 1437a54fc09eSVladimir Oltean if (tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]) 1438a54fc09eSVladimir Oltean val = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]); 1439a54fc09eSVladimir Oltean 1440a54fc09eSVladimir Oltean if (val > dev->max_mtu) { 1441a54fc09eSVladimir Oltean NL_SET_ERR_MSG_MOD(extack, "TC max SDU exceeds device max MTU"); 1442a54fc09eSVladimir Oltean return -ERANGE; 1443a54fc09eSVladimir Oltean } 1444a54fc09eSVladimir Oltean 1445a54fc09eSVladimir Oltean max_sdu[tc] = val; 1446a54fc09eSVladimir Oltean 1447a54fc09eSVladimir Oltean return 0; 1448a54fc09eSVladimir Oltean } 1449a54fc09eSVladimir Oltean 1450a54fc09eSVladimir Oltean static int taprio_parse_tc_entries(struct Qdisc *sch, 1451a54fc09eSVladimir Oltean struct nlattr *opt, 1452a54fc09eSVladimir Oltean struct netlink_ext_ack *extack) 1453a54fc09eSVladimir Oltean { 1454a54fc09eSVladimir Oltean struct taprio_sched *q = qdisc_priv(sch); 1455a54fc09eSVladimir Oltean struct net_device *dev = qdisc_dev(sch); 1456a54fc09eSVladimir Oltean u32 max_sdu[TC_QOPT_MAX_QUEUE]; 1457a54fc09eSVladimir Oltean unsigned long seen_tcs = 0; 1458a54fc09eSVladimir Oltean struct nlattr *n; 1459a54fc09eSVladimir Oltean int tc, rem; 1460a54fc09eSVladimir Oltean int err = 0; 1461a54fc09eSVladimir Oltean 1462a54fc09eSVladimir Oltean for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) 1463a54fc09eSVladimir Oltean max_sdu[tc] = q->max_sdu[tc]; 1464a54fc09eSVladimir Oltean 1465a54fc09eSVladimir Oltean nla_for_each_nested(n, opt, rem) { 1466a54fc09eSVladimir Oltean if (nla_type(n) != TCA_TAPRIO_ATTR_TC_ENTRY) 1467a54fc09eSVladimir Oltean continue; 1468a54fc09eSVladimir Oltean 1469a54fc09eSVladimir Oltean err = taprio_parse_tc_entry(sch, n, max_sdu, &seen_tcs, extack); 1470a54fc09eSVladimir Oltean if (err) 1471a54fc09eSVladimir Oltean goto out; 1472a54fc09eSVladimir Oltean } 1473a54fc09eSVladimir Oltean 1474a54fc09eSVladimir Oltean for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) { 1475a54fc09eSVladimir Oltean q->max_sdu[tc] = max_sdu[tc]; 1476a54fc09eSVladimir Oltean if (max_sdu[tc]) 1477a54fc09eSVladimir Oltean q->max_frm_len[tc] = max_sdu[tc] + dev->hard_header_len; 1478a54fc09eSVladimir Oltean else 1479a54fc09eSVladimir Oltean q->max_frm_len[tc] = U32_MAX; /* never oversized */ 1480a54fc09eSVladimir Oltean } 1481a54fc09eSVladimir Oltean 1482a54fc09eSVladimir Oltean out: 1483a54fc09eSVladimir Oltean return err; 1484a54fc09eSVladimir Oltean } 1485a54fc09eSVladimir Oltean 1486b5a0faa3SIvan Khoronzhuk static int taprio_mqprio_cmp(const struct net_device *dev, 1487b5a0faa3SIvan Khoronzhuk const struct tc_mqprio_qopt *mqprio) 1488b5a0faa3SIvan Khoronzhuk { 1489b5a0faa3SIvan Khoronzhuk int i; 1490b5a0faa3SIvan Khoronzhuk 1491b5a0faa3SIvan Khoronzhuk if (!mqprio || mqprio->num_tc != dev->num_tc) 1492b5a0faa3SIvan Khoronzhuk return -1; 1493b5a0faa3SIvan Khoronzhuk 1494b5a0faa3SIvan Khoronzhuk for (i = 0; i < mqprio->num_tc; i++) 1495b5a0faa3SIvan Khoronzhuk if (dev->tc_to_txq[i].count != mqprio->count[i] || 1496b5a0faa3SIvan Khoronzhuk dev->tc_to_txq[i].offset != mqprio->offset[i]) 1497b5a0faa3SIvan Khoronzhuk return -1; 1498b5a0faa3SIvan Khoronzhuk 1499b5a0faa3SIvan Khoronzhuk for (i = 0; i <= TC_BITMASK; i++) 1500b5a0faa3SIvan Khoronzhuk if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i]) 1501b5a0faa3SIvan Khoronzhuk return -1; 1502b5a0faa3SIvan Khoronzhuk 1503b5a0faa3SIvan Khoronzhuk return 0; 1504b5a0faa3SIvan Khoronzhuk } 1505b5a0faa3SIvan Khoronzhuk 1506a9d62274SVinicius Costa Gomes /* The semantics of the 'flags' argument in relation to 'change()' 1507a9d62274SVinicius Costa Gomes * requests, are interpreted following two rules (which are applied in 1508a9d62274SVinicius Costa Gomes * this order): (1) an omitted 'flags' argument is interpreted as 1509a9d62274SVinicius Costa Gomes * zero; (2) the 'flags' of a "running" taprio instance cannot be 1510a9d62274SVinicius Costa Gomes * changed. 1511a9d62274SVinicius Costa Gomes */ 1512a9d62274SVinicius Costa Gomes static int taprio_new_flags(const struct nlattr *attr, u32 old, 1513a9d62274SVinicius Costa Gomes struct netlink_ext_ack *extack) 1514a9d62274SVinicius Costa Gomes { 1515a9d62274SVinicius Costa Gomes u32 new = 0; 1516a9d62274SVinicius Costa Gomes 1517a9d62274SVinicius Costa Gomes if (attr) 1518a9d62274SVinicius Costa Gomes new = nla_get_u32(attr); 1519a9d62274SVinicius Costa Gomes 1520a9d62274SVinicius Costa Gomes if (old != TAPRIO_FLAGS_INVALID && old != new) { 1521a9d62274SVinicius Costa Gomes NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported"); 1522a9d62274SVinicius Costa Gomes return -EOPNOTSUPP; 1523a9d62274SVinicius Costa Gomes } 1524a9d62274SVinicius Costa Gomes 1525a9d62274SVinicius Costa Gomes if (!taprio_flags_valid(new)) { 1526a9d62274SVinicius Costa Gomes NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid"); 1527a9d62274SVinicius Costa Gomes return -EINVAL; 1528a9d62274SVinicius Costa Gomes } 1529a9d62274SVinicius Costa Gomes 1530a9d62274SVinicius Costa Gomes return new; 1531a9d62274SVinicius Costa Gomes } 1532a9d62274SVinicius Costa Gomes 15335a781ccbSVinicius Costa Gomes static int taprio_change(struct Qdisc *sch, struct nlattr *opt, 15345a781ccbSVinicius Costa Gomes struct netlink_ext_ack *extack) 15355a781ccbSVinicius Costa Gomes { 15365a781ccbSVinicius Costa Gomes struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { }; 1537a3d43c0dSVinicius Costa Gomes struct sched_gate_list *oper, *admin, *new_admin; 15385a781ccbSVinicius Costa Gomes struct taprio_sched *q = qdisc_priv(sch); 15395a781ccbSVinicius Costa Gomes struct net_device *dev = qdisc_dev(sch); 15405a781ccbSVinicius Costa Gomes struct tc_mqprio_qopt *mqprio = NULL; 1541a3d43c0dSVinicius Costa Gomes unsigned long flags; 15425a781ccbSVinicius Costa Gomes ktime_t start; 15439c66d156SVinicius Costa Gomes int i, err; 15445a781ccbSVinicius Costa Gomes 15458cb08174SJohannes Berg err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt, 15465a781ccbSVinicius Costa Gomes taprio_policy, extack); 15475a781ccbSVinicius Costa Gomes if (err < 0) 15485a781ccbSVinicius Costa Gomes return err; 15495a781ccbSVinicius Costa Gomes 15505a781ccbSVinicius Costa Gomes if (tb[TCA_TAPRIO_ATTR_PRIOMAP]) 15515a781ccbSVinicius Costa Gomes mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]); 15525a781ccbSVinicius Costa Gomes 1553a9d62274SVinicius Costa Gomes err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS], 1554a9d62274SVinicius Costa Gomes q->flags, extack); 1555a9d62274SVinicius Costa Gomes if (err < 0) 1556a9d62274SVinicius Costa Gomes return err; 15574cfd5779SVedang Patel 1558a9d62274SVinicius Costa Gomes q->flags = err; 15594cfd5779SVedang Patel 1560a9d62274SVinicius Costa Gomes err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags); 15615a781ccbSVinicius Costa Gomes if (err < 0) 15625a781ccbSVinicius Costa Gomes return err; 15635a781ccbSVinicius Costa Gomes 1564a54fc09eSVladimir Oltean err = taprio_parse_tc_entries(sch, opt, extack); 1565a54fc09eSVladimir Oltean if (err) 1566a54fc09eSVladimir Oltean return err; 1567a54fc09eSVladimir Oltean 1568a3d43c0dSVinicius Costa Gomes new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL); 1569a3d43c0dSVinicius Costa Gomes if (!new_admin) { 1570a3d43c0dSVinicius Costa Gomes NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule"); 1571a3d43c0dSVinicius Costa Gomes return -ENOMEM; 1572a3d43c0dSVinicius Costa Gomes } 1573a3d43c0dSVinicius Costa Gomes INIT_LIST_HEAD(&new_admin->entries); 15745a781ccbSVinicius Costa Gomes 157518cdd2f0SVladimir Oltean oper = rtnl_dereference(q->oper_sched); 157618cdd2f0SVladimir Oltean admin = rtnl_dereference(q->admin_sched); 15775a781ccbSVinicius Costa Gomes 1578b5a0faa3SIvan Khoronzhuk /* no changes - no new mqprio settings */ 1579b5a0faa3SIvan Khoronzhuk if (!taprio_mqprio_cmp(dev, mqprio)) 1580b5a0faa3SIvan Khoronzhuk mqprio = NULL; 1581b5a0faa3SIvan Khoronzhuk 1582a3d43c0dSVinicius Costa Gomes if (mqprio && (oper || admin)) { 1583a3d43c0dSVinicius Costa Gomes NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported"); 1584a3d43c0dSVinicius Costa Gomes err = -ENOTSUPP; 1585a3d43c0dSVinicius Costa Gomes goto free_sched; 15865a781ccbSVinicius Costa Gomes } 15875a781ccbSVinicius Costa Gomes 1588b5b73b26SVinicius Costa Gomes err = parse_taprio_schedule(q, tb, new_admin, extack); 1589a3d43c0dSVinicius Costa Gomes if (err < 0) 1590a3d43c0dSVinicius Costa Gomes goto free_sched; 15915a781ccbSVinicius Costa Gomes 1592a3d43c0dSVinicius Costa Gomes if (new_admin->num_entries == 0) { 1593a3d43c0dSVinicius Costa Gomes NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule"); 1594a3d43c0dSVinicius Costa Gomes err = -EINVAL; 1595a3d43c0dSVinicius Costa Gomes goto free_sched; 1596a3d43c0dSVinicius Costa Gomes } 15975a781ccbSVinicius Costa Gomes 15989c66d156SVinicius Costa Gomes err = taprio_parse_clockid(sch, tb, extack); 15999c66d156SVinicius Costa Gomes if (err < 0) 1600a3d43c0dSVinicius Costa Gomes goto free_sched; 1601a3d43c0dSVinicius Costa Gomes 1602a3d43c0dSVinicius Costa Gomes taprio_set_picos_per_byte(dev, q); 1603a3d43c0dSVinicius Costa Gomes 16045652e63dSVinicius Costa Gomes if (mqprio) { 1605efe487fcSHaimin Zhang err = netdev_set_num_tc(dev, mqprio->num_tc); 1606efe487fcSHaimin Zhang if (err) 1607efe487fcSHaimin Zhang goto free_sched; 16085652e63dSVinicius Costa Gomes for (i = 0; i < mqprio->num_tc; i++) 16095652e63dSVinicius Costa Gomes netdev_set_tc_queue(dev, i, 16105652e63dSVinicius Costa Gomes mqprio->count[i], 16115652e63dSVinicius Costa Gomes mqprio->offset[i]); 16125652e63dSVinicius Costa Gomes 16135652e63dSVinicius Costa Gomes /* Always use supplied priority mappings */ 16145652e63dSVinicius Costa Gomes for (i = 0; i <= TC_BITMASK; i++) 16155652e63dSVinicius Costa Gomes netdev_set_prio_tc_map(dev, i, 16165652e63dSVinicius Costa Gomes mqprio->prio_tc_map[i]); 16175652e63dSVinicius Costa Gomes } 16185652e63dSVinicius Costa Gomes 1619a9d62274SVinicius Costa Gomes if (FULL_OFFLOAD_IS_ENABLED(q->flags)) 162009e31cf0SVinicius Costa Gomes err = taprio_enable_offload(dev, q, new_admin, extack); 16219c66d156SVinicius Costa Gomes else 16229c66d156SVinicius Costa Gomes err = taprio_disable_offload(dev, q, extack); 16239c66d156SVinicius Costa Gomes if (err) 16249c66d156SVinicius Costa Gomes goto free_sched; 16259c66d156SVinicius Costa Gomes 1626a3d43c0dSVinicius Costa Gomes /* Protects against enqueue()/dequeue() */ 1627a3d43c0dSVinicius Costa Gomes spin_lock_bh(qdisc_lock(sch)); 1628a3d43c0dSVinicius Costa Gomes 16294cfd5779SVedang Patel if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) { 16304cfd5779SVedang Patel if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) { 16314cfd5779SVedang Patel NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled"); 16324cfd5779SVedang Patel err = -EINVAL; 16334cfd5779SVedang Patel goto unlock; 16344cfd5779SVedang Patel } 16354cfd5779SVedang Patel 1636a5b64700SVedang Patel q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]); 16374cfd5779SVedang Patel } 16384cfd5779SVedang Patel 1639a9d62274SVinicius Costa Gomes if (!TXTIME_ASSIST_IS_ENABLED(q->flags) && 1640a9d62274SVinicius Costa Gomes !FULL_OFFLOAD_IS_ENABLED(q->flags) && 16414cfd5779SVedang Patel !hrtimer_active(&q->advance_timer)) { 1642a3d43c0dSVinicius Costa Gomes hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS); 1643a3d43c0dSVinicius Costa Gomes q->advance_timer.function = advance_sched; 16445a781ccbSVinicius Costa Gomes } 16455a781ccbSVinicius Costa Gomes 1646a3d43c0dSVinicius Costa Gomes err = taprio_get_start_time(sch, new_admin, &start); 1647a3d43c0dSVinicius Costa Gomes if (err < 0) { 1648a3d43c0dSVinicius Costa Gomes NL_SET_ERR_MSG(extack, "Internal error: failed get start time"); 1649a3d43c0dSVinicius Costa Gomes goto unlock; 1650a3d43c0dSVinicius Costa Gomes } 16515a781ccbSVinicius Costa Gomes 16524cfd5779SVedang Patel setup_txtime(q, new_admin, start); 16534cfd5779SVedang Patel 1654bfabd41dSVinicius Costa Gomes if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { 16554cfd5779SVedang Patel if (!oper) { 16564cfd5779SVedang Patel rcu_assign_pointer(q->oper_sched, new_admin); 16574cfd5779SVedang Patel err = 0; 16584cfd5779SVedang Patel new_admin = NULL; 16594cfd5779SVedang Patel goto unlock; 16604cfd5779SVedang Patel } 16614cfd5779SVedang Patel 16624cfd5779SVedang Patel rcu_assign_pointer(q->admin_sched, new_admin); 16634cfd5779SVedang Patel if (admin) 16644cfd5779SVedang Patel call_rcu(&admin->rcu, taprio_free_sched_cb); 16654cfd5779SVedang Patel } else { 1666a3d43c0dSVinicius Costa Gomes setup_first_close_time(q, new_admin, start); 1667a3d43c0dSVinicius Costa Gomes 1668a3d43c0dSVinicius Costa Gomes /* Protects against advance_sched() */ 1669a3d43c0dSVinicius Costa Gomes spin_lock_irqsave(&q->current_entry_lock, flags); 1670a3d43c0dSVinicius Costa Gomes 1671a3d43c0dSVinicius Costa Gomes taprio_start_sched(sch, start, new_admin); 1672a3d43c0dSVinicius Costa Gomes 1673a3d43c0dSVinicius Costa Gomes rcu_assign_pointer(q->admin_sched, new_admin); 1674a3d43c0dSVinicius Costa Gomes if (admin) 1675a3d43c0dSVinicius Costa Gomes call_rcu(&admin->rcu, taprio_free_sched_cb); 1676a3d43c0dSVinicius Costa Gomes 1677a3d43c0dSVinicius Costa Gomes spin_unlock_irqrestore(&q->current_entry_lock, flags); 16780763b3e8SIvan Khoronzhuk 1679a9d62274SVinicius Costa Gomes if (FULL_OFFLOAD_IS_ENABLED(q->flags)) 16800763b3e8SIvan Khoronzhuk taprio_offload_config_changed(q); 16814cfd5779SVedang Patel } 1682a3d43c0dSVinicius Costa Gomes 16834cfd5779SVedang Patel new_admin = NULL; 1684a3d43c0dSVinicius Costa Gomes err = 0; 1685a3d43c0dSVinicius Costa Gomes 1686a3d43c0dSVinicius Costa Gomes unlock: 1687a3d43c0dSVinicius Costa Gomes spin_unlock_bh(qdisc_lock(sch)); 1688a3d43c0dSVinicius Costa Gomes 1689a3d43c0dSVinicius Costa Gomes free_sched: 169051650d33SIvan Khoronzhuk if (new_admin) 169151650d33SIvan Khoronzhuk call_rcu(&new_admin->rcu, taprio_free_sched_cb); 1692a3d43c0dSVinicius Costa Gomes 1693a3d43c0dSVinicius Costa Gomes return err; 16945a781ccbSVinicius Costa Gomes } 16955a781ccbSVinicius Costa Gomes 169644d4775cSDavide Caratti static void taprio_reset(struct Qdisc *sch) 169744d4775cSDavide Caratti { 169844d4775cSDavide Caratti struct taprio_sched *q = qdisc_priv(sch); 169944d4775cSDavide Caratti struct net_device *dev = qdisc_dev(sch); 170044d4775cSDavide Caratti int i; 170144d4775cSDavide Caratti 170244d4775cSDavide Caratti hrtimer_cancel(&q->advance_timer); 1703*3a415d59SEric Dumazet qdisc_synchronize(sch); 1704*3a415d59SEric Dumazet 170544d4775cSDavide Caratti if (q->qdiscs) { 1706698285daSDavide Caratti for (i = 0; i < dev->num_tx_queues; i++) 1707698285daSDavide Caratti if (q->qdiscs[i]) 170844d4775cSDavide Caratti qdisc_reset(q->qdiscs[i]); 170944d4775cSDavide Caratti } 171044d4775cSDavide Caratti } 171144d4775cSDavide Caratti 17125a781ccbSVinicius Costa Gomes static void taprio_destroy(struct Qdisc *sch) 17135a781ccbSVinicius Costa Gomes { 17145a781ccbSVinicius Costa Gomes struct taprio_sched *q = qdisc_priv(sch); 17155a781ccbSVinicius Costa Gomes struct net_device *dev = qdisc_dev(sch); 17169af23657SVladimir Oltean struct sched_gate_list *oper, *admin; 17175a781ccbSVinicius Costa Gomes unsigned int i; 17185a781ccbSVinicius Costa Gomes 17197b9eba7bSLeandro Dorileo list_del(&q->taprio_list); 17207b9eba7bSLeandro Dorileo 1721a56d447fSEric Dumazet /* Note that taprio_reset() might not be called if an error 1722a56d447fSEric Dumazet * happens in qdisc_create(), after taprio_init() has been called. 1723a56d447fSEric Dumazet */ 1724a56d447fSEric Dumazet hrtimer_cancel(&q->advance_timer); 1725*3a415d59SEric Dumazet qdisc_synchronize(sch); 17265a781ccbSVinicius Costa Gomes 17279c66d156SVinicius Costa Gomes taprio_disable_offload(dev, q, NULL); 17289c66d156SVinicius Costa Gomes 17295a781ccbSVinicius Costa Gomes if (q->qdiscs) { 1730698285daSDavide Caratti for (i = 0; i < dev->num_tx_queues; i++) 17315a781ccbSVinicius Costa Gomes qdisc_put(q->qdiscs[i]); 17325a781ccbSVinicius Costa Gomes 17335a781ccbSVinicius Costa Gomes kfree(q->qdiscs); 17345a781ccbSVinicius Costa Gomes } 17355a781ccbSVinicius Costa Gomes q->qdiscs = NULL; 17365a781ccbSVinicius Costa Gomes 17377c16680aSVinicius Costa Gomes netdev_reset_tc(dev); 17385a781ccbSVinicius Costa Gomes 17399af23657SVladimir Oltean oper = rtnl_dereference(q->oper_sched); 17409af23657SVladimir Oltean admin = rtnl_dereference(q->admin_sched); 1741a3d43c0dSVinicius Costa Gomes 17429af23657SVladimir Oltean if (oper) 17439af23657SVladimir Oltean call_rcu(&oper->rcu, taprio_free_sched_cb); 17449af23657SVladimir Oltean 17459af23657SVladimir Oltean if (admin) 17469af23657SVladimir Oltean call_rcu(&admin->rcu, taprio_free_sched_cb); 17475a781ccbSVinicius Costa Gomes } 17485a781ccbSVinicius Costa Gomes 17495a781ccbSVinicius Costa Gomes static int taprio_init(struct Qdisc *sch, struct nlattr *opt, 17505a781ccbSVinicius Costa Gomes struct netlink_ext_ack *extack) 17515a781ccbSVinicius Costa Gomes { 17525a781ccbSVinicius Costa Gomes struct taprio_sched *q = qdisc_priv(sch); 17535a781ccbSVinicius Costa Gomes struct net_device *dev = qdisc_dev(sch); 1754a3d43c0dSVinicius Costa Gomes int i; 17555a781ccbSVinicius Costa Gomes 17565a781ccbSVinicius Costa Gomes spin_lock_init(&q->current_entry_lock); 17575a781ccbSVinicius Costa Gomes 17585a781ccbSVinicius Costa Gomes hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS); 1759a3d43c0dSVinicius Costa Gomes q->advance_timer.function = advance_sched; 17605a781ccbSVinicius Costa Gomes 17615a781ccbSVinicius Costa Gomes q->root = sch; 17625a781ccbSVinicius Costa Gomes 17635a781ccbSVinicius Costa Gomes /* We only support static clockids. Use an invalid value as default 17645a781ccbSVinicius Costa Gomes * and get the valid one on taprio_change(). 17655a781ccbSVinicius Costa Gomes */ 17665a781ccbSVinicius Costa Gomes q->clockid = -1; 1767a9d62274SVinicius Costa Gomes q->flags = TAPRIO_FLAGS_INVALID; 17685a781ccbSVinicius Costa Gomes 1769efb55222SVladimir Oltean list_add(&q->taprio_list, &taprio_list); 1770efb55222SVladimir Oltean 1771026de64dSVladimir Oltean if (sch->parent != TC_H_ROOT) { 1772026de64dSVladimir Oltean NL_SET_ERR_MSG_MOD(extack, "Can only be attached as root qdisc"); 17735a781ccbSVinicius Costa Gomes return -EOPNOTSUPP; 1774026de64dSVladimir Oltean } 17755a781ccbSVinicius Costa Gomes 1776026de64dSVladimir Oltean if (!netif_is_multiqueue(dev)) { 1777026de64dSVladimir Oltean NL_SET_ERR_MSG_MOD(extack, "Multi-queue device is required"); 17785a781ccbSVinicius Costa Gomes return -EOPNOTSUPP; 1779026de64dSVladimir Oltean } 17805a781ccbSVinicius Costa Gomes 17815a781ccbSVinicius Costa Gomes /* pre-allocate qdisc, attachment can't fail */ 17825a781ccbSVinicius Costa Gomes q->qdiscs = kcalloc(dev->num_tx_queues, 17835a781ccbSVinicius Costa Gomes sizeof(q->qdiscs[0]), 17845a781ccbSVinicius Costa Gomes GFP_KERNEL); 17855a781ccbSVinicius Costa Gomes 17865a781ccbSVinicius Costa Gomes if (!q->qdiscs) 17875a781ccbSVinicius Costa Gomes return -ENOMEM; 17885a781ccbSVinicius Costa Gomes 17895a781ccbSVinicius Costa Gomes if (!opt) 17905a781ccbSVinicius Costa Gomes return -EINVAL; 17915a781ccbSVinicius Costa Gomes 1792a3d43c0dSVinicius Costa Gomes for (i = 0; i < dev->num_tx_queues; i++) { 1793a3d43c0dSVinicius Costa Gomes struct netdev_queue *dev_queue; 1794a3d43c0dSVinicius Costa Gomes struct Qdisc *qdisc; 1795a3d43c0dSVinicius Costa Gomes 1796a3d43c0dSVinicius Costa Gomes dev_queue = netdev_get_tx_queue(dev, i); 1797a3d43c0dSVinicius Costa Gomes qdisc = qdisc_create_dflt(dev_queue, 1798a3d43c0dSVinicius Costa Gomes &pfifo_qdisc_ops, 1799a3d43c0dSVinicius Costa Gomes TC_H_MAKE(TC_H_MAJ(sch->handle), 1800a3d43c0dSVinicius Costa Gomes TC_H_MIN(i + 1)), 1801a3d43c0dSVinicius Costa Gomes extack); 1802a3d43c0dSVinicius Costa Gomes if (!qdisc) 1803a3d43c0dSVinicius Costa Gomes return -ENOMEM; 1804a3d43c0dSVinicius Costa Gomes 1805a3d43c0dSVinicius Costa Gomes if (i < dev->real_num_tx_queues) 1806a3d43c0dSVinicius Costa Gomes qdisc_hash_add(qdisc, false); 1807a3d43c0dSVinicius Costa Gomes 1808a3d43c0dSVinicius Costa Gomes q->qdiscs[i] = qdisc; 1809a3d43c0dSVinicius Costa Gomes } 1810a3d43c0dSVinicius Costa Gomes 18115a781ccbSVinicius Costa Gomes return taprio_change(sch, opt, extack); 18125a781ccbSVinicius Costa Gomes } 18135a781ccbSVinicius Costa Gomes 181413511704SYannick Vignon static void taprio_attach(struct Qdisc *sch) 181513511704SYannick Vignon { 181613511704SYannick Vignon struct taprio_sched *q = qdisc_priv(sch); 181713511704SYannick Vignon struct net_device *dev = qdisc_dev(sch); 181813511704SYannick Vignon unsigned int ntx; 181913511704SYannick Vignon 182013511704SYannick Vignon /* Attach underlying qdisc */ 182113511704SYannick Vignon for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { 182213511704SYannick Vignon struct Qdisc *qdisc = q->qdiscs[ntx]; 182313511704SYannick Vignon struct Qdisc *old; 182413511704SYannick Vignon 182513511704SYannick Vignon if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { 182613511704SYannick Vignon qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 182713511704SYannick Vignon old = dev_graft_qdisc(qdisc->dev_queue, qdisc); 182813511704SYannick Vignon } else { 182913511704SYannick Vignon old = dev_graft_qdisc(qdisc->dev_queue, sch); 183013511704SYannick Vignon qdisc_refcount_inc(sch); 183113511704SYannick Vignon } 183213511704SYannick Vignon if (old) 183313511704SYannick Vignon qdisc_put(old); 183413511704SYannick Vignon } 183513511704SYannick Vignon 183613511704SYannick Vignon /* access to the child qdiscs is not needed in offload mode */ 183713511704SYannick Vignon if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { 183813511704SYannick Vignon kfree(q->qdiscs); 183913511704SYannick Vignon q->qdiscs = NULL; 184013511704SYannick Vignon } 184113511704SYannick Vignon } 184213511704SYannick Vignon 18435a781ccbSVinicius Costa Gomes static struct netdev_queue *taprio_queue_get(struct Qdisc *sch, 18445a781ccbSVinicius Costa Gomes unsigned long cl) 18455a781ccbSVinicius Costa Gomes { 18465a781ccbSVinicius Costa Gomes struct net_device *dev = qdisc_dev(sch); 18475a781ccbSVinicius Costa Gomes unsigned long ntx = cl - 1; 18485a781ccbSVinicius Costa Gomes 18495a781ccbSVinicius Costa Gomes if (ntx >= dev->num_tx_queues) 18505a781ccbSVinicius Costa Gomes return NULL; 18515a781ccbSVinicius Costa Gomes 18525a781ccbSVinicius Costa Gomes return netdev_get_tx_queue(dev, ntx); 18535a781ccbSVinicius Costa Gomes } 18545a781ccbSVinicius Costa Gomes 18555a781ccbSVinicius Costa Gomes static int taprio_graft(struct Qdisc *sch, unsigned long cl, 18565a781ccbSVinicius Costa Gomes struct Qdisc *new, struct Qdisc **old, 18575a781ccbSVinicius Costa Gomes struct netlink_ext_ack *extack) 18585a781ccbSVinicius Costa Gomes { 18595a781ccbSVinicius Costa Gomes struct taprio_sched *q = qdisc_priv(sch); 18605a781ccbSVinicius Costa Gomes struct net_device *dev = qdisc_dev(sch); 18615a781ccbSVinicius Costa Gomes struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); 18625a781ccbSVinicius Costa Gomes 18635a781ccbSVinicius Costa Gomes if (!dev_queue) 18645a781ccbSVinicius Costa Gomes return -EINVAL; 18655a781ccbSVinicius Costa Gomes 18665a781ccbSVinicius Costa Gomes if (dev->flags & IFF_UP) 18675a781ccbSVinicius Costa Gomes dev_deactivate(dev); 18685a781ccbSVinicius Costa Gomes 186913511704SYannick Vignon if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { 187013511704SYannick Vignon *old = dev_graft_qdisc(dev_queue, new); 187113511704SYannick Vignon } else { 18725a781ccbSVinicius Costa Gomes *old = q->qdiscs[cl - 1]; 18735a781ccbSVinicius Costa Gomes q->qdiscs[cl - 1] = new; 187413511704SYannick Vignon } 18755a781ccbSVinicius Costa Gomes 18765a781ccbSVinicius Costa Gomes if (new) 18775a781ccbSVinicius Costa Gomes new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 18785a781ccbSVinicius Costa Gomes 18795a781ccbSVinicius Costa Gomes if (dev->flags & IFF_UP) 18805a781ccbSVinicius Costa Gomes dev_activate(dev); 18815a781ccbSVinicius Costa Gomes 18825a781ccbSVinicius Costa Gomes return 0; 18835a781ccbSVinicius Costa Gomes } 18845a781ccbSVinicius Costa Gomes 18855a781ccbSVinicius Costa Gomes static int dump_entry(struct sk_buff *msg, 18865a781ccbSVinicius Costa Gomes const struct sched_entry *entry) 18875a781ccbSVinicius Costa Gomes { 18885a781ccbSVinicius Costa Gomes struct nlattr *item; 18895a781ccbSVinicius Costa Gomes 1890ae0be8deSMichal Kubecek item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY); 18915a781ccbSVinicius Costa Gomes if (!item) 18925a781ccbSVinicius Costa Gomes return -ENOSPC; 18935a781ccbSVinicius Costa Gomes 18945a781ccbSVinicius Costa Gomes if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index)) 18955a781ccbSVinicius Costa Gomes goto nla_put_failure; 18965a781ccbSVinicius Costa Gomes 18975a781ccbSVinicius Costa Gomes if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command)) 18985a781ccbSVinicius Costa Gomes goto nla_put_failure; 18995a781ccbSVinicius Costa Gomes 19005a781ccbSVinicius Costa Gomes if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, 19015a781ccbSVinicius Costa Gomes entry->gate_mask)) 19025a781ccbSVinicius Costa Gomes goto nla_put_failure; 19035a781ccbSVinicius Costa Gomes 19045a781ccbSVinicius Costa Gomes if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL, 19055a781ccbSVinicius Costa Gomes entry->interval)) 19065a781ccbSVinicius Costa Gomes goto nla_put_failure; 19075a781ccbSVinicius Costa Gomes 19085a781ccbSVinicius Costa Gomes return nla_nest_end(msg, item); 19095a781ccbSVinicius Costa Gomes 19105a781ccbSVinicius Costa Gomes nla_put_failure: 19115a781ccbSVinicius Costa Gomes nla_nest_cancel(msg, item); 19125a781ccbSVinicius Costa Gomes return -1; 19135a781ccbSVinicius Costa Gomes } 19145a781ccbSVinicius Costa Gomes 1915a3d43c0dSVinicius Costa Gomes static int dump_schedule(struct sk_buff *msg, 1916a3d43c0dSVinicius Costa Gomes const struct sched_gate_list *root) 1917a3d43c0dSVinicius Costa Gomes { 1918a3d43c0dSVinicius Costa Gomes struct nlattr *entry_list; 1919a3d43c0dSVinicius Costa Gomes struct sched_entry *entry; 1920a3d43c0dSVinicius Costa Gomes 1921a3d43c0dSVinicius Costa Gomes if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME, 1922a3d43c0dSVinicius Costa Gomes root->base_time, TCA_TAPRIO_PAD)) 1923a3d43c0dSVinicius Costa Gomes return -1; 1924a3d43c0dSVinicius Costa Gomes 19256ca6a665SVinicius Costa Gomes if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME, 19266ca6a665SVinicius Costa Gomes root->cycle_time, TCA_TAPRIO_PAD)) 19276ca6a665SVinicius Costa Gomes return -1; 19286ca6a665SVinicius Costa Gomes 1929c25031e9SVinicius Costa Gomes if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, 1930c25031e9SVinicius Costa Gomes root->cycle_time_extension, TCA_TAPRIO_PAD)) 1931c25031e9SVinicius Costa Gomes return -1; 1932c25031e9SVinicius Costa Gomes 1933a3d43c0dSVinicius Costa Gomes entry_list = nla_nest_start_noflag(msg, 1934a3d43c0dSVinicius Costa Gomes TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST); 1935a3d43c0dSVinicius Costa Gomes if (!entry_list) 1936a3d43c0dSVinicius Costa Gomes goto error_nest; 1937a3d43c0dSVinicius Costa Gomes 1938a3d43c0dSVinicius Costa Gomes list_for_each_entry(entry, &root->entries, list) { 1939a3d43c0dSVinicius Costa Gomes if (dump_entry(msg, entry) < 0) 1940a3d43c0dSVinicius Costa Gomes goto error_nest; 1941a3d43c0dSVinicius Costa Gomes } 1942a3d43c0dSVinicius Costa Gomes 1943a3d43c0dSVinicius Costa Gomes nla_nest_end(msg, entry_list); 1944a3d43c0dSVinicius Costa Gomes return 0; 1945a3d43c0dSVinicius Costa Gomes 1946a3d43c0dSVinicius Costa Gomes error_nest: 1947a3d43c0dSVinicius Costa Gomes nla_nest_cancel(msg, entry_list); 1948a3d43c0dSVinicius Costa Gomes return -1; 1949a3d43c0dSVinicius Costa Gomes } 1950a3d43c0dSVinicius Costa Gomes 1951a54fc09eSVladimir Oltean static int taprio_dump_tc_entries(struct taprio_sched *q, struct sk_buff *skb) 1952a54fc09eSVladimir Oltean { 1953a54fc09eSVladimir Oltean struct nlattr *n; 1954a54fc09eSVladimir Oltean int tc; 1955a54fc09eSVladimir Oltean 1956a54fc09eSVladimir Oltean for (tc = 0; tc < TC_MAX_QUEUE; tc++) { 1957a54fc09eSVladimir Oltean n = nla_nest_start(skb, TCA_TAPRIO_ATTR_TC_ENTRY); 1958a54fc09eSVladimir Oltean if (!n) 1959a54fc09eSVladimir Oltean return -EMSGSIZE; 1960a54fc09eSVladimir Oltean 1961a54fc09eSVladimir Oltean if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_INDEX, tc)) 1962a54fc09eSVladimir Oltean goto nla_put_failure; 1963a54fc09eSVladimir Oltean 1964a54fc09eSVladimir Oltean if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_MAX_SDU, 1965a54fc09eSVladimir Oltean q->max_sdu[tc])) 1966a54fc09eSVladimir Oltean goto nla_put_failure; 1967a54fc09eSVladimir Oltean 1968a54fc09eSVladimir Oltean nla_nest_end(skb, n); 1969a54fc09eSVladimir Oltean } 1970a54fc09eSVladimir Oltean 1971a54fc09eSVladimir Oltean return 0; 1972a54fc09eSVladimir Oltean 1973a54fc09eSVladimir Oltean nla_put_failure: 1974a54fc09eSVladimir Oltean nla_nest_cancel(skb, n); 1975a54fc09eSVladimir Oltean return -EMSGSIZE; 1976a54fc09eSVladimir Oltean } 1977a54fc09eSVladimir Oltean 19785a781ccbSVinicius Costa Gomes static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb) 19795a781ccbSVinicius Costa Gomes { 19805a781ccbSVinicius Costa Gomes struct taprio_sched *q = qdisc_priv(sch); 19815a781ccbSVinicius Costa Gomes struct net_device *dev = qdisc_dev(sch); 1982a3d43c0dSVinicius Costa Gomes struct sched_gate_list *oper, *admin; 19835a781ccbSVinicius Costa Gomes struct tc_mqprio_qopt opt = { 0 }; 1984a3d43c0dSVinicius Costa Gomes struct nlattr *nest, *sched_nest; 19855a781ccbSVinicius Costa Gomes unsigned int i; 19865a781ccbSVinicius Costa Gomes 198718cdd2f0SVladimir Oltean oper = rtnl_dereference(q->oper_sched); 198818cdd2f0SVladimir Oltean admin = rtnl_dereference(q->admin_sched); 1989a3d43c0dSVinicius Costa Gomes 19905a781ccbSVinicius Costa Gomes opt.num_tc = netdev_get_num_tc(dev); 19915a781ccbSVinicius Costa Gomes memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map)); 19925a781ccbSVinicius Costa Gomes 19935a781ccbSVinicius Costa Gomes for (i = 0; i < netdev_get_num_tc(dev); i++) { 19945a781ccbSVinicius Costa Gomes opt.count[i] = dev->tc_to_txq[i].count; 19955a781ccbSVinicius Costa Gomes opt.offset[i] = dev->tc_to_txq[i].offset; 19965a781ccbSVinicius Costa Gomes } 19975a781ccbSVinicius Costa Gomes 1998ae0be8deSMichal Kubecek nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 19995a781ccbSVinicius Costa Gomes if (!nest) 2000a3d43c0dSVinicius Costa Gomes goto start_error; 20015a781ccbSVinicius Costa Gomes 20025a781ccbSVinicius Costa Gomes if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt)) 20035a781ccbSVinicius Costa Gomes goto options_error; 20045a781ccbSVinicius Costa Gomes 20059c66d156SVinicius Costa Gomes if (!FULL_OFFLOAD_IS_ENABLED(q->flags) && 20069c66d156SVinicius Costa Gomes nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid)) 20075a781ccbSVinicius Costa Gomes goto options_error; 20085a781ccbSVinicius Costa Gomes 20094cfd5779SVedang Patel if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags)) 20104cfd5779SVedang Patel goto options_error; 20114cfd5779SVedang Patel 20124cfd5779SVedang Patel if (q->txtime_delay && 2013a5b64700SVedang Patel nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay)) 20144cfd5779SVedang Patel goto options_error; 20154cfd5779SVedang Patel 2016a54fc09eSVladimir Oltean if (taprio_dump_tc_entries(q, skb)) 2017a54fc09eSVladimir Oltean goto options_error; 2018a54fc09eSVladimir Oltean 2019a3d43c0dSVinicius Costa Gomes if (oper && dump_schedule(skb, oper)) 20205a781ccbSVinicius Costa Gomes goto options_error; 20215a781ccbSVinicius Costa Gomes 2022a3d43c0dSVinicius Costa Gomes if (!admin) 2023a3d43c0dSVinicius Costa Gomes goto done; 20245a781ccbSVinicius Costa Gomes 2025a3d43c0dSVinicius Costa Gomes sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED); 2026e4acf427SColin Ian King if (!sched_nest) 2027e4acf427SColin Ian King goto options_error; 2028a3d43c0dSVinicius Costa Gomes 2029a3d43c0dSVinicius Costa Gomes if (dump_schedule(skb, admin)) 2030a3d43c0dSVinicius Costa Gomes goto admin_error; 2031a3d43c0dSVinicius Costa Gomes 2032a3d43c0dSVinicius Costa Gomes nla_nest_end(skb, sched_nest); 2033a3d43c0dSVinicius Costa Gomes 2034a3d43c0dSVinicius Costa Gomes done: 20355a781ccbSVinicius Costa Gomes return nla_nest_end(skb, nest); 20365a781ccbSVinicius Costa Gomes 2037a3d43c0dSVinicius Costa Gomes admin_error: 2038a3d43c0dSVinicius Costa Gomes nla_nest_cancel(skb, sched_nest); 2039a3d43c0dSVinicius Costa Gomes 20405a781ccbSVinicius Costa Gomes options_error: 20415a781ccbSVinicius Costa Gomes nla_nest_cancel(skb, nest); 2042a3d43c0dSVinicius Costa Gomes 2043a3d43c0dSVinicius Costa Gomes start_error: 2044a3d43c0dSVinicius Costa Gomes return -ENOSPC; 20455a781ccbSVinicius Costa Gomes } 20465a781ccbSVinicius Costa Gomes 20475a781ccbSVinicius Costa Gomes static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl) 20485a781ccbSVinicius Costa Gomes { 2049af7b29b1SVladimir Oltean struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); 20505a781ccbSVinicius Costa Gomes 2051af7b29b1SVladimir Oltean if (!dev_queue) 20525a781ccbSVinicius Costa Gomes return NULL; 20535a781ccbSVinicius Costa Gomes 2054af7b29b1SVladimir Oltean return dev_queue->qdisc_sleeping; 20555a781ccbSVinicius Costa Gomes } 20565a781ccbSVinicius Costa Gomes 20575a781ccbSVinicius Costa Gomes static unsigned long taprio_find(struct Qdisc *sch, u32 classid) 20585a781ccbSVinicius Costa Gomes { 20595a781ccbSVinicius Costa Gomes unsigned int ntx = TC_H_MIN(classid); 20605a781ccbSVinicius Costa Gomes 20615a781ccbSVinicius Costa Gomes if (!taprio_queue_get(sch, ntx)) 20625a781ccbSVinicius Costa Gomes return 0; 20635a781ccbSVinicius Costa Gomes return ntx; 20645a781ccbSVinicius Costa Gomes } 20655a781ccbSVinicius Costa Gomes 20665a781ccbSVinicius Costa Gomes static int taprio_dump_class(struct Qdisc *sch, unsigned long cl, 20675a781ccbSVinicius Costa Gomes struct sk_buff *skb, struct tcmsg *tcm) 20685a781ccbSVinicius Costa Gomes { 20695a781ccbSVinicius Costa Gomes struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); 20705a781ccbSVinicius Costa Gomes 20715a781ccbSVinicius Costa Gomes tcm->tcm_parent = TC_H_ROOT; 20725a781ccbSVinicius Costa Gomes tcm->tcm_handle |= TC_H_MIN(cl); 20735a781ccbSVinicius Costa Gomes tcm->tcm_info = dev_queue->qdisc_sleeping->handle; 20745a781ccbSVinicius Costa Gomes 20755a781ccbSVinicius Costa Gomes return 0; 20765a781ccbSVinicius Costa Gomes } 20775a781ccbSVinicius Costa Gomes 20785a781ccbSVinicius Costa Gomes static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, 20795a781ccbSVinicius Costa Gomes struct gnet_dump *d) 20805a781ccbSVinicius Costa Gomes __releases(d->lock) 20815a781ccbSVinicius Costa Gomes __acquires(d->lock) 20825a781ccbSVinicius Costa Gomes { 20835a781ccbSVinicius Costa Gomes struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); 20845a781ccbSVinicius Costa Gomes 20855a781ccbSVinicius Costa Gomes sch = dev_queue->qdisc_sleeping; 208629cbcd85SAhmed S. Darwish if (gnet_stats_copy_basic(d, NULL, &sch->bstats, true) < 0 || 20875dd431b6SPaolo Abeni qdisc_qstats_copy(d, sch) < 0) 20885a781ccbSVinicius Costa Gomes return -1; 20895a781ccbSVinicius Costa Gomes return 0; 20905a781ccbSVinicius Costa Gomes } 20915a781ccbSVinicius Costa Gomes 20925a781ccbSVinicius Costa Gomes static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg) 20935a781ccbSVinicius Costa Gomes { 20945a781ccbSVinicius Costa Gomes struct net_device *dev = qdisc_dev(sch); 20955a781ccbSVinicius Costa Gomes unsigned long ntx; 20965a781ccbSVinicius Costa Gomes 20975a781ccbSVinicius Costa Gomes if (arg->stop) 20985a781ccbSVinicius Costa Gomes return; 20995a781ccbSVinicius Costa Gomes 21005a781ccbSVinicius Costa Gomes arg->count = arg->skip; 21015a781ccbSVinicius Costa Gomes for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) { 2102e046fa89SZhengchao Shao if (!tc_qdisc_stats_dump(sch, ntx + 1, arg)) 21035a781ccbSVinicius Costa Gomes break; 21045a781ccbSVinicius Costa Gomes } 21055a781ccbSVinicius Costa Gomes } 21065a781ccbSVinicius Costa Gomes 21075a781ccbSVinicius Costa Gomes static struct netdev_queue *taprio_select_queue(struct Qdisc *sch, 21085a781ccbSVinicius Costa Gomes struct tcmsg *tcm) 21095a781ccbSVinicius Costa Gomes { 21105a781ccbSVinicius Costa Gomes return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent)); 21115a781ccbSVinicius Costa Gomes } 21125a781ccbSVinicius Costa Gomes 21135a781ccbSVinicius Costa Gomes static const struct Qdisc_class_ops taprio_class_ops = { 21145a781ccbSVinicius Costa Gomes .graft = taprio_graft, 21155a781ccbSVinicius Costa Gomes .leaf = taprio_leaf, 21165a781ccbSVinicius Costa Gomes .find = taprio_find, 21175a781ccbSVinicius Costa Gomes .walk = taprio_walk, 21185a781ccbSVinicius Costa Gomes .dump = taprio_dump_class, 21195a781ccbSVinicius Costa Gomes .dump_stats = taprio_dump_class_stats, 21205a781ccbSVinicius Costa Gomes .select_queue = taprio_select_queue, 21215a781ccbSVinicius Costa Gomes }; 21225a781ccbSVinicius Costa Gomes 21235a781ccbSVinicius Costa Gomes static struct Qdisc_ops taprio_qdisc_ops __read_mostly = { 21245a781ccbSVinicius Costa Gomes .cl_ops = &taprio_class_ops, 21255a781ccbSVinicius Costa Gomes .id = "taprio", 21265a781ccbSVinicius Costa Gomes .priv_size = sizeof(struct taprio_sched), 21275a781ccbSVinicius Costa Gomes .init = taprio_init, 2128a3d43c0dSVinicius Costa Gomes .change = taprio_change, 21295a781ccbSVinicius Costa Gomes .destroy = taprio_destroy, 213044d4775cSDavide Caratti .reset = taprio_reset, 213113511704SYannick Vignon .attach = taprio_attach, 21325a781ccbSVinicius Costa Gomes .peek = taprio_peek, 21335a781ccbSVinicius Costa Gomes .dequeue = taprio_dequeue, 21345a781ccbSVinicius Costa Gomes .enqueue = taprio_enqueue, 21355a781ccbSVinicius Costa Gomes .dump = taprio_dump, 21365a781ccbSVinicius Costa Gomes .owner = THIS_MODULE, 21375a781ccbSVinicius Costa Gomes }; 21385a781ccbSVinicius Costa Gomes 21397b9eba7bSLeandro Dorileo static struct notifier_block taprio_device_notifier = { 21407b9eba7bSLeandro Dorileo .notifier_call = taprio_dev_notifier, 21417b9eba7bSLeandro Dorileo }; 21427b9eba7bSLeandro Dorileo 21435a781ccbSVinicius Costa Gomes static int __init taprio_module_init(void) 21445a781ccbSVinicius Costa Gomes { 21457b9eba7bSLeandro Dorileo int err = register_netdevice_notifier(&taprio_device_notifier); 21467b9eba7bSLeandro Dorileo 21477b9eba7bSLeandro Dorileo if (err) 21487b9eba7bSLeandro Dorileo return err; 21497b9eba7bSLeandro Dorileo 21505a781ccbSVinicius Costa Gomes return register_qdisc(&taprio_qdisc_ops); 21515a781ccbSVinicius Costa Gomes } 21525a781ccbSVinicius Costa Gomes 21535a781ccbSVinicius Costa Gomes static void __exit taprio_module_exit(void) 21545a781ccbSVinicius Costa Gomes { 21555a781ccbSVinicius Costa Gomes unregister_qdisc(&taprio_qdisc_ops); 21567b9eba7bSLeandro Dorileo unregister_netdevice_notifier(&taprio_device_notifier); 21575a781ccbSVinicius Costa Gomes } 21585a781ccbSVinicius Costa Gomes 21595a781ccbSVinicius Costa Gomes module_init(taprio_module_init); 21605a781ccbSVinicius Costa Gomes module_exit(taprio_module_exit); 21615a781ccbSVinicius Costa Gomes MODULE_LICENSE("GPL"); 2162