xref: /openbmc/linux/net/sched/sch_taprio.c (revision ecc0cc98632ac80ead7821997fdd5ad9cdede9de)
15a781ccbSVinicius Costa Gomes // SPDX-License-Identifier: GPL-2.0
25a781ccbSVinicius Costa Gomes 
35a781ccbSVinicius Costa Gomes /* net/sched/sch_taprio.c	 Time Aware Priority Scheduler
45a781ccbSVinicius Costa Gomes  *
55a781ccbSVinicius Costa Gomes  * Authors:	Vinicius Costa Gomes <vinicius.gomes@intel.com>
65a781ccbSVinicius Costa Gomes  *
75a781ccbSVinicius Costa Gomes  */
85a781ccbSVinicius Costa Gomes 
9cc69837fSJakub Kicinski #include <linux/ethtool.h>
105a781ccbSVinicius Costa Gomes #include <linux/types.h>
115a781ccbSVinicius Costa Gomes #include <linux/slab.h>
125a781ccbSVinicius Costa Gomes #include <linux/kernel.h>
135a781ccbSVinicius Costa Gomes #include <linux/string.h>
145a781ccbSVinicius Costa Gomes #include <linux/list.h>
155a781ccbSVinicius Costa Gomes #include <linux/errno.h>
165a781ccbSVinicius Costa Gomes #include <linux/skbuff.h>
1723bddf69SJakub Kicinski #include <linux/math64.h>
185a781ccbSVinicius Costa Gomes #include <linux/module.h>
195a781ccbSVinicius Costa Gomes #include <linux/spinlock.h>
20a3d43c0dSVinicius Costa Gomes #include <linux/rcupdate.h>
21837ced3aSVladimir Oltean #include <linux/time.h>
225a781ccbSVinicius Costa Gomes #include <net/netlink.h>
235a781ccbSVinicius Costa Gomes #include <net/pkt_sched.h>
245a781ccbSVinicius Costa Gomes #include <net/pkt_cls.h>
255a781ccbSVinicius Costa Gomes #include <net/sch_generic.h>
264cfd5779SVedang Patel #include <net/sock.h>
2754002066SVedang Patel #include <net/tcp.h>
285a781ccbSVinicius Costa Gomes 
291dfe086dSVladimir Oltean #include "sch_mqprio_lib.h"
301dfe086dSVladimir Oltean 
317b9eba7bSLeandro Dorileo static LIST_HEAD(taprio_list);
327b9eba7bSLeandro Dorileo 
335a781ccbSVinicius Costa Gomes #define TAPRIO_ALL_GATES_OPEN -1
345a781ccbSVinicius Costa Gomes 
354cfd5779SVedang Patel #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
369c66d156SVinicius Costa Gomes #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
37a9d62274SVinicius Costa Gomes #define TAPRIO_FLAGS_INVALID U32_MAX
384cfd5779SVedang Patel 
395a781ccbSVinicius Costa Gomes struct sched_entry {
405a781ccbSVinicius Costa Gomes 	struct list_head list;
415a781ccbSVinicius Costa Gomes 
425a781ccbSVinicius Costa Gomes 	/* The instant that this entry "closes" and the next one
435a781ccbSVinicius Costa Gomes 	 * should open, the qdisc will make some effort so that no
445a781ccbSVinicius Costa Gomes 	 * packet leaves after this time.
455a781ccbSVinicius Costa Gomes 	 */
465a781ccbSVinicius Costa Gomes 	ktime_t close_time;
474cfd5779SVedang Patel 	ktime_t next_txtime;
485a781ccbSVinicius Costa Gomes 	atomic_t budget;
495a781ccbSVinicius Costa Gomes 	int index;
505a781ccbSVinicius Costa Gomes 	u32 gate_mask;
515a781ccbSVinicius Costa Gomes 	u32 interval;
525a781ccbSVinicius Costa Gomes 	u8 command;
535a781ccbSVinicius Costa Gomes };
545a781ccbSVinicius Costa Gomes 
55a3d43c0dSVinicius Costa Gomes struct sched_gate_list {
56a3d43c0dSVinicius Costa Gomes 	struct rcu_head rcu;
57a3d43c0dSVinicius Costa Gomes 	struct list_head entries;
58a3d43c0dSVinicius Costa Gomes 	size_t num_entries;
596ca6a665SVinicius Costa Gomes 	ktime_t cycle_close_time;
606ca6a665SVinicius Costa Gomes 	s64 cycle_time;
61c25031e9SVinicius Costa Gomes 	s64 cycle_time_extension;
62a3d43c0dSVinicius Costa Gomes 	s64 base_time;
63a3d43c0dSVinicius Costa Gomes };
64a3d43c0dSVinicius Costa Gomes 
655a781ccbSVinicius Costa Gomes struct taprio_sched {
665a781ccbSVinicius Costa Gomes 	struct Qdisc **qdiscs;
675a781ccbSVinicius Costa Gomes 	struct Qdisc *root;
684cfd5779SVedang Patel 	u32 flags;
697ede7b03SVedang Patel 	enum tk_offsets tk_offset;
705a781ccbSVinicius Costa Gomes 	int clockid;
71db46e3a8SVladimir Oltean 	bool offloaded;
727b9eba7bSLeandro Dorileo 	atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
735a781ccbSVinicius Costa Gomes 				    * speeds it's sub-nanoseconds per byte
745a781ccbSVinicius Costa Gomes 				    */
755a781ccbSVinicius Costa Gomes 
765a781ccbSVinicius Costa Gomes 	/* Protects the update side of the RCU protected current_entry */
775a781ccbSVinicius Costa Gomes 	spinlock_t current_entry_lock;
785a781ccbSVinicius Costa Gomes 	struct sched_entry __rcu *current_entry;
79a3d43c0dSVinicius Costa Gomes 	struct sched_gate_list __rcu *oper_sched;
80a3d43c0dSVinicius Costa Gomes 	struct sched_gate_list __rcu *admin_sched;
815a781ccbSVinicius Costa Gomes 	struct hrtimer advance_timer;
827b9eba7bSLeandro Dorileo 	struct list_head taprio_list;
83a54fc09eSVladimir Oltean 	u32 max_frm_len[TC_MAX_QUEUE]; /* for the fast path */
84a54fc09eSVladimir Oltean 	u32 max_sdu[TC_MAX_QUEUE]; /* for dump and offloading */
85a5b64700SVedang Patel 	u32 txtime_delay;
865a781ccbSVinicius Costa Gomes };
875a781ccbSVinicius Costa Gomes 
889c66d156SVinicius Costa Gomes struct __tc_taprio_qopt_offload {
899c66d156SVinicius Costa Gomes 	refcount_t users;
909c66d156SVinicius Costa Gomes 	struct tc_taprio_qopt_offload offload;
919c66d156SVinicius Costa Gomes };
929c66d156SVinicius Costa Gomes 
93a3d43c0dSVinicius Costa Gomes static ktime_t sched_base_time(const struct sched_gate_list *sched)
94a3d43c0dSVinicius Costa Gomes {
95a3d43c0dSVinicius Costa Gomes 	if (!sched)
96a3d43c0dSVinicius Costa Gomes 		return KTIME_MAX;
97a3d43c0dSVinicius Costa Gomes 
98a3d43c0dSVinicius Costa Gomes 	return ns_to_ktime(sched->base_time);
99a3d43c0dSVinicius Costa Gomes }
100a3d43c0dSVinicius Costa Gomes 
1016dc25401SEric Dumazet static ktime_t taprio_mono_to_any(const struct taprio_sched *q, ktime_t mono)
1027ede7b03SVedang Patel {
1036dc25401SEric Dumazet 	/* This pairs with WRITE_ONCE() in taprio_parse_clockid() */
1046dc25401SEric Dumazet 	enum tk_offsets tk_offset = READ_ONCE(q->tk_offset);
1057ede7b03SVedang Patel 
1066dc25401SEric Dumazet 	switch (tk_offset) {
1077ede7b03SVedang Patel 	case TK_OFFS_MAX:
1087ede7b03SVedang Patel 		return mono;
1097ede7b03SVedang Patel 	default:
1106dc25401SEric Dumazet 		return ktime_mono_to_any(mono, tk_offset);
1116dc25401SEric Dumazet 	}
1127ede7b03SVedang Patel }
1137ede7b03SVedang Patel 
1146dc25401SEric Dumazet static ktime_t taprio_get_time(const struct taprio_sched *q)
1156dc25401SEric Dumazet {
1166dc25401SEric Dumazet 	return taprio_mono_to_any(q, ktime_get());
1177ede7b03SVedang Patel }
1187ede7b03SVedang Patel 
119a3d43c0dSVinicius Costa Gomes static void taprio_free_sched_cb(struct rcu_head *head)
120a3d43c0dSVinicius Costa Gomes {
121a3d43c0dSVinicius Costa Gomes 	struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu);
122a3d43c0dSVinicius Costa Gomes 	struct sched_entry *entry, *n;
123a3d43c0dSVinicius Costa Gomes 
124a3d43c0dSVinicius Costa Gomes 	list_for_each_entry_safe(entry, n, &sched->entries, list) {
125a3d43c0dSVinicius Costa Gomes 		list_del(&entry->list);
126a3d43c0dSVinicius Costa Gomes 		kfree(entry);
127a3d43c0dSVinicius Costa Gomes 	}
128a3d43c0dSVinicius Costa Gomes 
129a3d43c0dSVinicius Costa Gomes 	kfree(sched);
130a3d43c0dSVinicius Costa Gomes }
131a3d43c0dSVinicius Costa Gomes 
132a3d43c0dSVinicius Costa Gomes static void switch_schedules(struct taprio_sched *q,
133a3d43c0dSVinicius Costa Gomes 			     struct sched_gate_list **admin,
134a3d43c0dSVinicius Costa Gomes 			     struct sched_gate_list **oper)
135a3d43c0dSVinicius Costa Gomes {
136a3d43c0dSVinicius Costa Gomes 	rcu_assign_pointer(q->oper_sched, *admin);
137a3d43c0dSVinicius Costa Gomes 	rcu_assign_pointer(q->admin_sched, NULL);
138a3d43c0dSVinicius Costa Gomes 
139a3d43c0dSVinicius Costa Gomes 	if (*oper)
140a3d43c0dSVinicius Costa Gomes 		call_rcu(&(*oper)->rcu, taprio_free_sched_cb);
141a3d43c0dSVinicius Costa Gomes 
142a3d43c0dSVinicius Costa Gomes 	*oper = *admin;
143a3d43c0dSVinicius Costa Gomes 	*admin = NULL;
144a3d43c0dSVinicius Costa Gomes }
145a3d43c0dSVinicius Costa Gomes 
1464cfd5779SVedang Patel /* Get how much time has been already elapsed in the current cycle. */
1474cfd5779SVedang Patel static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time)
1484cfd5779SVedang Patel {
1494cfd5779SVedang Patel 	ktime_t time_since_sched_start;
1504cfd5779SVedang Patel 	s32 time_elapsed;
1514cfd5779SVedang Patel 
1524cfd5779SVedang Patel 	time_since_sched_start = ktime_sub(time, sched->base_time);
1534cfd5779SVedang Patel 	div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed);
1544cfd5779SVedang Patel 
1554cfd5779SVedang Patel 	return time_elapsed;
1564cfd5779SVedang Patel }
1574cfd5779SVedang Patel 
1584cfd5779SVedang Patel static ktime_t get_interval_end_time(struct sched_gate_list *sched,
1594cfd5779SVedang Patel 				     struct sched_gate_list *admin,
1604cfd5779SVedang Patel 				     struct sched_entry *entry,
1614cfd5779SVedang Patel 				     ktime_t intv_start)
1624cfd5779SVedang Patel {
1634cfd5779SVedang Patel 	s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start);
1644cfd5779SVedang Patel 	ktime_t intv_end, cycle_ext_end, cycle_end;
1654cfd5779SVedang Patel 
1664cfd5779SVedang Patel 	cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed);
1674cfd5779SVedang Patel 	intv_end = ktime_add_ns(intv_start, entry->interval);
1684cfd5779SVedang Patel 	cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension);
1694cfd5779SVedang Patel 
1704cfd5779SVedang Patel 	if (ktime_before(intv_end, cycle_end))
1714cfd5779SVedang Patel 		return intv_end;
1724cfd5779SVedang Patel 	else if (admin && admin != sched &&
1734cfd5779SVedang Patel 		 ktime_after(admin->base_time, cycle_end) &&
1744cfd5779SVedang Patel 		 ktime_before(admin->base_time, cycle_ext_end))
1754cfd5779SVedang Patel 		return admin->base_time;
1764cfd5779SVedang Patel 	else
1774cfd5779SVedang Patel 		return cycle_end;
1784cfd5779SVedang Patel }
1794cfd5779SVedang Patel 
1804cfd5779SVedang Patel static int length_to_duration(struct taprio_sched *q, int len)
1814cfd5779SVedang Patel {
182837ced3aSVladimir Oltean 	return div_u64(len * atomic64_read(&q->picos_per_byte), PSEC_PER_NSEC);
1834cfd5779SVedang Patel }
1844cfd5779SVedang Patel 
1854cfd5779SVedang Patel /* Returns the entry corresponding to next available interval. If
1864cfd5779SVedang Patel  * validate_interval is set, it only validates whether the timestamp occurs
1874cfd5779SVedang Patel  * when the gate corresponding to the skb's traffic class is open.
1884cfd5779SVedang Patel  */
1894cfd5779SVedang Patel static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb,
1904cfd5779SVedang Patel 						  struct Qdisc *sch,
1914cfd5779SVedang Patel 						  struct sched_gate_list *sched,
1924cfd5779SVedang Patel 						  struct sched_gate_list *admin,
1934cfd5779SVedang Patel 						  ktime_t time,
1944cfd5779SVedang Patel 						  ktime_t *interval_start,
1954cfd5779SVedang Patel 						  ktime_t *interval_end,
1964cfd5779SVedang Patel 						  bool validate_interval)
1974cfd5779SVedang Patel {
1984cfd5779SVedang Patel 	ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time;
1994cfd5779SVedang Patel 	ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time;
2004cfd5779SVedang Patel 	struct sched_entry *entry = NULL, *entry_found = NULL;
2014cfd5779SVedang Patel 	struct taprio_sched *q = qdisc_priv(sch);
2024cfd5779SVedang Patel 	struct net_device *dev = qdisc_dev(sch);
2034cfd5779SVedang Patel 	bool entry_available = false;
2044cfd5779SVedang Patel 	s32 cycle_elapsed;
2054cfd5779SVedang Patel 	int tc, n;
2064cfd5779SVedang Patel 
2074cfd5779SVedang Patel 	tc = netdev_get_prio_tc_map(dev, skb->priority);
2084cfd5779SVedang Patel 	packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb));
2094cfd5779SVedang Patel 
2104cfd5779SVedang Patel 	*interval_start = 0;
2114cfd5779SVedang Patel 	*interval_end = 0;
2124cfd5779SVedang Patel 
2134cfd5779SVedang Patel 	if (!sched)
2144cfd5779SVedang Patel 		return NULL;
2154cfd5779SVedang Patel 
2164cfd5779SVedang Patel 	cycle = sched->cycle_time;
2174cfd5779SVedang Patel 	cycle_elapsed = get_cycle_time_elapsed(sched, time);
2184cfd5779SVedang Patel 	curr_intv_end = ktime_sub_ns(time, cycle_elapsed);
2194cfd5779SVedang Patel 	cycle_end = ktime_add_ns(curr_intv_end, cycle);
2204cfd5779SVedang Patel 
2214cfd5779SVedang Patel 	list_for_each_entry(entry, &sched->entries, list) {
2224cfd5779SVedang Patel 		curr_intv_start = curr_intv_end;
2234cfd5779SVedang Patel 		curr_intv_end = get_interval_end_time(sched, admin, entry,
2244cfd5779SVedang Patel 						      curr_intv_start);
2254cfd5779SVedang Patel 
2264cfd5779SVedang Patel 		if (ktime_after(curr_intv_start, cycle_end))
2274cfd5779SVedang Patel 			break;
2284cfd5779SVedang Patel 
2294cfd5779SVedang Patel 		if (!(entry->gate_mask & BIT(tc)) ||
2304cfd5779SVedang Patel 		    packet_transmit_time > entry->interval)
2314cfd5779SVedang Patel 			continue;
2324cfd5779SVedang Patel 
2334cfd5779SVedang Patel 		txtime = entry->next_txtime;
2344cfd5779SVedang Patel 
2354cfd5779SVedang Patel 		if (ktime_before(txtime, time) || validate_interval) {
2364cfd5779SVedang Patel 			transmit_end_time = ktime_add_ns(time, packet_transmit_time);
2374cfd5779SVedang Patel 			if ((ktime_before(curr_intv_start, time) &&
2384cfd5779SVedang Patel 			     ktime_before(transmit_end_time, curr_intv_end)) ||
2394cfd5779SVedang Patel 			    (ktime_after(curr_intv_start, time) && !validate_interval)) {
2404cfd5779SVedang Patel 				entry_found = entry;
2414cfd5779SVedang Patel 				*interval_start = curr_intv_start;
2424cfd5779SVedang Patel 				*interval_end = curr_intv_end;
2434cfd5779SVedang Patel 				break;
2444cfd5779SVedang Patel 			} else if (!entry_available && !validate_interval) {
2454cfd5779SVedang Patel 				/* Here, we are just trying to find out the
2464cfd5779SVedang Patel 				 * first available interval in the next cycle.
2474cfd5779SVedang Patel 				 */
2480deee7aaSJiapeng Zhong 				entry_available = true;
2494cfd5779SVedang Patel 				entry_found = entry;
2504cfd5779SVedang Patel 				*interval_start = ktime_add_ns(curr_intv_start, cycle);
2514cfd5779SVedang Patel 				*interval_end = ktime_add_ns(curr_intv_end, cycle);
2524cfd5779SVedang Patel 			}
2534cfd5779SVedang Patel 		} else if (ktime_before(txtime, earliest_txtime) &&
2544cfd5779SVedang Patel 			   !entry_available) {
2554cfd5779SVedang Patel 			earliest_txtime = txtime;
2564cfd5779SVedang Patel 			entry_found = entry;
2574cfd5779SVedang Patel 			n = div_s64(ktime_sub(txtime, curr_intv_start), cycle);
2584cfd5779SVedang Patel 			*interval_start = ktime_add(curr_intv_start, n * cycle);
2594cfd5779SVedang Patel 			*interval_end = ktime_add(curr_intv_end, n * cycle);
2604cfd5779SVedang Patel 		}
2614cfd5779SVedang Patel 	}
2624cfd5779SVedang Patel 
2634cfd5779SVedang Patel 	return entry_found;
2644cfd5779SVedang Patel }
2654cfd5779SVedang Patel 
2664cfd5779SVedang Patel static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
2674cfd5779SVedang Patel {
2684cfd5779SVedang Patel 	struct taprio_sched *q = qdisc_priv(sch);
2694cfd5779SVedang Patel 	struct sched_gate_list *sched, *admin;
2704cfd5779SVedang Patel 	ktime_t interval_start, interval_end;
2714cfd5779SVedang Patel 	struct sched_entry *entry;
2724cfd5779SVedang Patel 
2734cfd5779SVedang Patel 	rcu_read_lock();
2744cfd5779SVedang Patel 	sched = rcu_dereference(q->oper_sched);
2754cfd5779SVedang Patel 	admin = rcu_dereference(q->admin_sched);
2764cfd5779SVedang Patel 
2774cfd5779SVedang Patel 	entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp,
2784cfd5779SVedang Patel 				       &interval_start, &interval_end, true);
2794cfd5779SVedang Patel 	rcu_read_unlock();
2804cfd5779SVedang Patel 
2814cfd5779SVedang Patel 	return entry;
2824cfd5779SVedang Patel }
2834cfd5779SVedang Patel 
2849c66d156SVinicius Costa Gomes static bool taprio_flags_valid(u32 flags)
2859c66d156SVinicius Costa Gomes {
2869c66d156SVinicius Costa Gomes 	/* Make sure no other flag bits are set. */
2879c66d156SVinicius Costa Gomes 	if (flags & ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST |
2889c66d156SVinicius Costa Gomes 		      TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
2899c66d156SVinicius Costa Gomes 		return false;
2909c66d156SVinicius Costa Gomes 	/* txtime-assist and full offload are mutually exclusive */
2919c66d156SVinicius Costa Gomes 	if ((flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&
2929c66d156SVinicius Costa Gomes 	    (flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
2939c66d156SVinicius Costa Gomes 		return false;
2949c66d156SVinicius Costa Gomes 	return true;
2959c66d156SVinicius Costa Gomes }
2969c66d156SVinicius Costa Gomes 
29754002066SVedang Patel /* This returns the tstamp value set by TCP in terms of the set clock. */
29854002066SVedang Patel static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
29954002066SVedang Patel {
30054002066SVedang Patel 	unsigned int offset = skb_network_offset(skb);
30154002066SVedang Patel 	const struct ipv6hdr *ipv6h;
30254002066SVedang Patel 	const struct iphdr *iph;
30354002066SVedang Patel 	struct ipv6hdr _ipv6h;
30454002066SVedang Patel 
30554002066SVedang Patel 	ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
30654002066SVedang Patel 	if (!ipv6h)
30754002066SVedang Patel 		return 0;
30854002066SVedang Patel 
30954002066SVedang Patel 	if (ipv6h->version == 4) {
31054002066SVedang Patel 		iph = (struct iphdr *)ipv6h;
31154002066SVedang Patel 		offset += iph->ihl * 4;
31254002066SVedang Patel 
31354002066SVedang Patel 		/* special-case 6in4 tunnelling, as that is a common way to get
31454002066SVedang Patel 		 * v6 connectivity in the home
31554002066SVedang Patel 		 */
31654002066SVedang Patel 		if (iph->protocol == IPPROTO_IPV6) {
31754002066SVedang Patel 			ipv6h = skb_header_pointer(skb, offset,
31854002066SVedang Patel 						   sizeof(_ipv6h), &_ipv6h);
31954002066SVedang Patel 
32054002066SVedang Patel 			if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
32154002066SVedang Patel 				return 0;
32254002066SVedang Patel 		} else if (iph->protocol != IPPROTO_TCP) {
32354002066SVedang Patel 			return 0;
32454002066SVedang Patel 		}
32554002066SVedang Patel 	} else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) {
32654002066SVedang Patel 		return 0;
32754002066SVedang Patel 	}
32854002066SVedang Patel 
3296dc25401SEric Dumazet 	return taprio_mono_to_any(q, skb->skb_mstamp_ns);
33054002066SVedang Patel }
33154002066SVedang Patel 
3324cfd5779SVedang Patel /* There are a few scenarios where we will have to modify the txtime from
3334cfd5779SVedang Patel  * what is read from next_txtime in sched_entry. They are:
3344cfd5779SVedang Patel  * 1. If txtime is in the past,
3354cfd5779SVedang Patel  *    a. The gate for the traffic class is currently open and packet can be
3364cfd5779SVedang Patel  *       transmitted before it closes, schedule the packet right away.
3374cfd5779SVedang Patel  *    b. If the gate corresponding to the traffic class is going to open later
3384cfd5779SVedang Patel  *       in the cycle, set the txtime of packet to the interval start.
3394cfd5779SVedang Patel  * 2. If txtime is in the future, there are packets corresponding to the
3404cfd5779SVedang Patel  *    current traffic class waiting to be transmitted. So, the following
3414cfd5779SVedang Patel  *    possibilities exist:
3424cfd5779SVedang Patel  *    a. We can transmit the packet before the window containing the txtime
3434cfd5779SVedang Patel  *       closes.
3444cfd5779SVedang Patel  *    b. The window might close before the transmission can be completed
3454cfd5779SVedang Patel  *       successfully. So, schedule the packet in the next open window.
3464cfd5779SVedang Patel  */
3474cfd5779SVedang Patel static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch)
3484cfd5779SVedang Patel {
34954002066SVedang Patel 	ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp;
3504cfd5779SVedang Patel 	struct taprio_sched *q = qdisc_priv(sch);
3514cfd5779SVedang Patel 	struct sched_gate_list *sched, *admin;
3524cfd5779SVedang Patel 	ktime_t minimum_time, now, txtime;
3534cfd5779SVedang Patel 	int len, packet_transmit_time;
3544cfd5779SVedang Patel 	struct sched_entry *entry;
3554cfd5779SVedang Patel 	bool sched_changed;
3564cfd5779SVedang Patel 
3577ede7b03SVedang Patel 	now = taprio_get_time(q);
3584cfd5779SVedang Patel 	minimum_time = ktime_add_ns(now, q->txtime_delay);
3594cfd5779SVedang Patel 
36054002066SVedang Patel 	tcp_tstamp = get_tcp_tstamp(q, skb);
36154002066SVedang Patel 	minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp);
36254002066SVedang Patel 
3634cfd5779SVedang Patel 	rcu_read_lock();
3644cfd5779SVedang Patel 	admin = rcu_dereference(q->admin_sched);
3654cfd5779SVedang Patel 	sched = rcu_dereference(q->oper_sched);
3664cfd5779SVedang Patel 	if (admin && ktime_after(minimum_time, admin->base_time))
3674cfd5779SVedang Patel 		switch_schedules(q, &admin, &sched);
3684cfd5779SVedang Patel 
3694cfd5779SVedang Patel 	/* Until the schedule starts, all the queues are open */
3704cfd5779SVedang Patel 	if (!sched || ktime_before(minimum_time, sched->base_time)) {
3714cfd5779SVedang Patel 		txtime = minimum_time;
3724cfd5779SVedang Patel 		goto done;
3734cfd5779SVedang Patel 	}
3744cfd5779SVedang Patel 
3754cfd5779SVedang Patel 	len = qdisc_pkt_len(skb);
3764cfd5779SVedang Patel 	packet_transmit_time = length_to_duration(q, len);
3774cfd5779SVedang Patel 
3784cfd5779SVedang Patel 	do {
3790deee7aaSJiapeng Zhong 		sched_changed = false;
3804cfd5779SVedang Patel 
3814cfd5779SVedang Patel 		entry = find_entry_to_transmit(skb, sch, sched, admin,
3824cfd5779SVedang Patel 					       minimum_time,
3834cfd5779SVedang Patel 					       &interval_start, &interval_end,
3844cfd5779SVedang Patel 					       false);
3854cfd5779SVedang Patel 		if (!entry) {
3864cfd5779SVedang Patel 			txtime = 0;
3874cfd5779SVedang Patel 			goto done;
3884cfd5779SVedang Patel 		}
3894cfd5779SVedang Patel 
3904cfd5779SVedang Patel 		txtime = entry->next_txtime;
3914cfd5779SVedang Patel 		txtime = max_t(ktime_t, txtime, minimum_time);
3924cfd5779SVedang Patel 		txtime = max_t(ktime_t, txtime, interval_start);
3934cfd5779SVedang Patel 
3944cfd5779SVedang Patel 		if (admin && admin != sched &&
3954cfd5779SVedang Patel 		    ktime_after(txtime, admin->base_time)) {
3964cfd5779SVedang Patel 			sched = admin;
3970deee7aaSJiapeng Zhong 			sched_changed = true;
3984cfd5779SVedang Patel 			continue;
3994cfd5779SVedang Patel 		}
4004cfd5779SVedang Patel 
4014cfd5779SVedang Patel 		transmit_end_time = ktime_add(txtime, packet_transmit_time);
4024cfd5779SVedang Patel 		minimum_time = transmit_end_time;
4034cfd5779SVedang Patel 
4044cfd5779SVedang Patel 		/* Update the txtime of current entry to the next time it's
4054cfd5779SVedang Patel 		 * interval starts.
4064cfd5779SVedang Patel 		 */
4074cfd5779SVedang Patel 		if (ktime_after(transmit_end_time, interval_end))
4084cfd5779SVedang Patel 			entry->next_txtime = ktime_add(interval_start, sched->cycle_time);
4094cfd5779SVedang Patel 	} while (sched_changed || ktime_after(transmit_end_time, interval_end));
4104cfd5779SVedang Patel 
4114cfd5779SVedang Patel 	entry->next_txtime = transmit_end_time;
4124cfd5779SVedang Patel 
4134cfd5779SVedang Patel done:
4144cfd5779SVedang Patel 	rcu_read_unlock();
4154cfd5779SVedang Patel 	return txtime;
4164cfd5779SVedang Patel }
4174cfd5779SVedang Patel 
418497cc002SKurt Kanzenbach static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
419497cc002SKurt Kanzenbach 			      struct Qdisc *child, struct sk_buff **to_free)
4205a781ccbSVinicius Costa Gomes {
4215a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
422a54fc09eSVladimir Oltean 	struct net_device *dev = qdisc_dev(sch);
423a54fc09eSVladimir Oltean 	int prio = skb->priority;
424a54fc09eSVladimir Oltean 	u8 tc;
4255a781ccbSVinicius Costa Gomes 
426e8a64bbaSBenedikt Spranger 	/* sk_flags are only safe to use on full sockets. */
427e8a64bbaSBenedikt Spranger 	if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) {
4284cfd5779SVedang Patel 		if (!is_valid_interval(skb, sch))
4294cfd5779SVedang Patel 			return qdisc_drop(skb, sch, to_free);
4304cfd5779SVedang Patel 	} else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
4314cfd5779SVedang Patel 		skb->tstamp = get_packet_txtime(skb, sch);
4324cfd5779SVedang Patel 		if (!skb->tstamp)
4334cfd5779SVedang Patel 			return qdisc_drop(skb, sch, to_free);
4344cfd5779SVedang Patel 	}
4354cfd5779SVedang Patel 
436a54fc09eSVladimir Oltean 	/* Devices with full offload are expected to honor this in hardware */
437a54fc09eSVladimir Oltean 	tc = netdev_get_prio_tc_map(dev, prio);
438a54fc09eSVladimir Oltean 	if (skb->len > q->max_frm_len[tc])
439a54fc09eSVladimir Oltean 		return qdisc_drop(skb, sch, to_free);
440a54fc09eSVladimir Oltean 
4415a781ccbSVinicius Costa Gomes 	qdisc_qstats_backlog_inc(sch, skb);
4425a781ccbSVinicius Costa Gomes 	sch->q.qlen++;
4435a781ccbSVinicius Costa Gomes 
444ac5c66f2SPetr Machata 	return qdisc_enqueue(skb, child, to_free);
4455a781ccbSVinicius Costa Gomes }
4465a781ccbSVinicius Costa Gomes 
4472c08a4f8SVladimir Oltean /* Will not be called in the full offload case, since the TX queues are
4482c08a4f8SVladimir Oltean  * attached to the Qdisc created using qdisc_create_dflt()
4492c08a4f8SVladimir Oltean  */
450497cc002SKurt Kanzenbach static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
451497cc002SKurt Kanzenbach 			  struct sk_buff **to_free)
452497cc002SKurt Kanzenbach {
453497cc002SKurt Kanzenbach 	struct taprio_sched *q = qdisc_priv(sch);
454497cc002SKurt Kanzenbach 	struct Qdisc *child;
455497cc002SKurt Kanzenbach 	int queue;
456497cc002SKurt Kanzenbach 
457497cc002SKurt Kanzenbach 	queue = skb_get_queue_mapping(skb);
458497cc002SKurt Kanzenbach 
459497cc002SKurt Kanzenbach 	child = q->qdiscs[queue];
460497cc002SKurt Kanzenbach 	if (unlikely(!child))
461497cc002SKurt Kanzenbach 		return qdisc_drop(skb, sch, to_free);
462497cc002SKurt Kanzenbach 
463497cc002SKurt Kanzenbach 	/* Large packets might not be transmitted when the transmission duration
464497cc002SKurt Kanzenbach 	 * exceeds any configured interval. Therefore, segment the skb into
465fa65eddeSVladimir Oltean 	 * smaller chunks. Drivers with full offload are expected to handle
466fa65eddeSVladimir Oltean 	 * this in hardware.
467497cc002SKurt Kanzenbach 	 */
468fa65eddeSVladimir Oltean 	if (skb_is_gso(skb)) {
469497cc002SKurt Kanzenbach 		unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb);
470497cc002SKurt Kanzenbach 		netdev_features_t features = netif_skb_features(skb);
471497cc002SKurt Kanzenbach 		struct sk_buff *segs, *nskb;
472497cc002SKurt Kanzenbach 		int ret;
473497cc002SKurt Kanzenbach 
474497cc002SKurt Kanzenbach 		segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
475497cc002SKurt Kanzenbach 		if (IS_ERR_OR_NULL(segs))
476497cc002SKurt Kanzenbach 			return qdisc_drop(skb, sch, to_free);
477497cc002SKurt Kanzenbach 
478497cc002SKurt Kanzenbach 		skb_list_walk_safe(segs, segs, nskb) {
479497cc002SKurt Kanzenbach 			skb_mark_not_on_list(segs);
480497cc002SKurt Kanzenbach 			qdisc_skb_cb(segs)->pkt_len = segs->len;
481497cc002SKurt Kanzenbach 			slen += segs->len;
482497cc002SKurt Kanzenbach 
483497cc002SKurt Kanzenbach 			ret = taprio_enqueue_one(segs, sch, child, to_free);
484497cc002SKurt Kanzenbach 			if (ret != NET_XMIT_SUCCESS) {
485497cc002SKurt Kanzenbach 				if (net_xmit_drop_count(ret))
486497cc002SKurt Kanzenbach 					qdisc_qstats_drop(sch);
487497cc002SKurt Kanzenbach 			} else {
488497cc002SKurt Kanzenbach 				numsegs++;
489497cc002SKurt Kanzenbach 			}
490497cc002SKurt Kanzenbach 		}
491497cc002SKurt Kanzenbach 
492497cc002SKurt Kanzenbach 		if (numsegs > 1)
493497cc002SKurt Kanzenbach 			qdisc_tree_reduce_backlog(sch, 1 - numsegs, len - slen);
494497cc002SKurt Kanzenbach 		consume_skb(skb);
495497cc002SKurt Kanzenbach 
496497cc002SKurt Kanzenbach 		return numsegs > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
497497cc002SKurt Kanzenbach 	}
498497cc002SKurt Kanzenbach 
499497cc002SKurt Kanzenbach 	return taprio_enqueue_one(skb, sch, child, to_free);
500497cc002SKurt Kanzenbach }
501497cc002SKurt Kanzenbach 
50225becba6SVladimir Oltean static struct sk_buff *taprio_peek(struct Qdisc *sch)
5035a781ccbSVinicius Costa Gomes {
504*ecc0cc98SVladimir Oltean 	WARN_ONCE(1, "taprio only supports operating as root qdisc, peek() not implemented");
5055a781ccbSVinicius Costa Gomes 	return NULL;
5065a781ccbSVinicius Costa Gomes }
5075a781ccbSVinicius Costa Gomes 
50823bddf69SJakub Kicinski static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
50923bddf69SJakub Kicinski {
51023bddf69SJakub Kicinski 	atomic_set(&entry->budget,
511837ced3aSVladimir Oltean 		   div64_u64((u64)entry->interval * PSEC_PER_NSEC,
51223bddf69SJakub Kicinski 			     atomic64_read(&q->picos_per_byte)));
5135a781ccbSVinicius Costa Gomes }
5145a781ccbSVinicius Costa Gomes 
5152c08a4f8SVladimir Oltean /* Will not be called in the full offload case, since the TX queues are
5162c08a4f8SVladimir Oltean  * attached to the Qdisc created using qdisc_create_dflt()
5172c08a4f8SVladimir Oltean  */
51825becba6SVladimir Oltean static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
5195a781ccbSVinicius Costa Gomes {
5205a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
5215a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
5228c79f0eaSVinicius Costa Gomes 	struct sk_buff *skb = NULL;
5235a781ccbSVinicius Costa Gomes 	struct sched_entry *entry;
5245a781ccbSVinicius Costa Gomes 	u32 gate_mask;
5255a781ccbSVinicius Costa Gomes 	int i;
5265a781ccbSVinicius Costa Gomes 
5275a781ccbSVinicius Costa Gomes 	rcu_read_lock();
5285a781ccbSVinicius Costa Gomes 	entry = rcu_dereference(q->current_entry);
5295a781ccbSVinicius Costa Gomes 	/* if there's no entry, it means that the schedule didn't
5305a781ccbSVinicius Costa Gomes 	 * start yet, so force all gates to be open, this is in
5315a781ccbSVinicius Costa Gomes 	 * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
532633fa666SJesper Dangaard Brouer 	 * "AdminGateStates"
5335a781ccbSVinicius Costa Gomes 	 */
5345a781ccbSVinicius Costa Gomes 	gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
5355a781ccbSVinicius Costa Gomes 
5365a781ccbSVinicius Costa Gomes 	if (!gate_mask)
5378c79f0eaSVinicius Costa Gomes 		goto done;
5385a781ccbSVinicius Costa Gomes 
5395a781ccbSVinicius Costa Gomes 	for (i = 0; i < dev->num_tx_queues; i++) {
5405a781ccbSVinicius Costa Gomes 		struct Qdisc *child = q->qdiscs[i];
5415a781ccbSVinicius Costa Gomes 		ktime_t guard;
5425a781ccbSVinicius Costa Gomes 		int prio;
5435a781ccbSVinicius Costa Gomes 		int len;
5445a781ccbSVinicius Costa Gomes 		u8 tc;
5455a781ccbSVinicius Costa Gomes 
5465a781ccbSVinicius Costa Gomes 		if (unlikely(!child))
5475a781ccbSVinicius Costa Gomes 			continue;
5485a781ccbSVinicius Costa Gomes 
5494cfd5779SVedang Patel 		if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
5504cfd5779SVedang Patel 			skb = child->ops->dequeue(child);
5514cfd5779SVedang Patel 			if (!skb)
5524cfd5779SVedang Patel 				continue;
5534cfd5779SVedang Patel 			goto skb_found;
5544cfd5779SVedang Patel 		}
5554cfd5779SVedang Patel 
5565a781ccbSVinicius Costa Gomes 		skb = child->ops->peek(child);
5575a781ccbSVinicius Costa Gomes 		if (!skb)
5585a781ccbSVinicius Costa Gomes 			continue;
5595a781ccbSVinicius Costa Gomes 
5605a781ccbSVinicius Costa Gomes 		prio = skb->priority;
5615a781ccbSVinicius Costa Gomes 		tc = netdev_get_prio_tc_map(dev, prio);
5625a781ccbSVinicius Costa Gomes 
563b09fe70eSVinicius Costa Gomes 		if (!(gate_mask & BIT(tc))) {
564b09fe70eSVinicius Costa Gomes 			skb = NULL;
5655a781ccbSVinicius Costa Gomes 			continue;
566b09fe70eSVinicius Costa Gomes 		}
5675a781ccbSVinicius Costa Gomes 
5685a781ccbSVinicius Costa Gomes 		len = qdisc_pkt_len(skb);
5697ede7b03SVedang Patel 		guard = ktime_add_ns(taprio_get_time(q),
5705a781ccbSVinicius Costa Gomes 				     length_to_duration(q, len));
5715a781ccbSVinicius Costa Gomes 
5725a781ccbSVinicius Costa Gomes 		/* In the case that there's no gate entry, there's no
5735a781ccbSVinicius Costa Gomes 		 * guard band ...
5745a781ccbSVinicius Costa Gomes 		 */
5755a781ccbSVinicius Costa Gomes 		if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
576b09fe70eSVinicius Costa Gomes 		    ktime_after(guard, entry->close_time)) {
577b09fe70eSVinicius Costa Gomes 			skb = NULL;
5786e734c82SAndre Guedes 			continue;
579b09fe70eSVinicius Costa Gomes 		}
5805a781ccbSVinicius Costa Gomes 
5815a781ccbSVinicius Costa Gomes 		/* ... and no budget. */
5825a781ccbSVinicius Costa Gomes 		if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
583b09fe70eSVinicius Costa Gomes 		    atomic_sub_return(len, &entry->budget) < 0) {
584b09fe70eSVinicius Costa Gomes 			skb = NULL;
5856e734c82SAndre Guedes 			continue;
586b09fe70eSVinicius Costa Gomes 		}
5875a781ccbSVinicius Costa Gomes 
5885a781ccbSVinicius Costa Gomes 		skb = child->ops->dequeue(child);
5895a781ccbSVinicius Costa Gomes 		if (unlikely(!skb))
5908c79f0eaSVinicius Costa Gomes 			goto done;
5915a781ccbSVinicius Costa Gomes 
5924cfd5779SVedang Patel skb_found:
5935a781ccbSVinicius Costa Gomes 		qdisc_bstats_update(sch, skb);
5945a781ccbSVinicius Costa Gomes 		qdisc_qstats_backlog_dec(sch, skb);
5955a781ccbSVinicius Costa Gomes 		sch->q.qlen--;
5965a781ccbSVinicius Costa Gomes 
5978c79f0eaSVinicius Costa Gomes 		goto done;
5985a781ccbSVinicius Costa Gomes 	}
5995a781ccbSVinicius Costa Gomes 
6008c79f0eaSVinicius Costa Gomes done:
6018c79f0eaSVinicius Costa Gomes 	rcu_read_unlock();
6028c79f0eaSVinicius Costa Gomes 
6038c79f0eaSVinicius Costa Gomes 	return skb;
6045a781ccbSVinicius Costa Gomes }
6055a781ccbSVinicius Costa Gomes 
6066ca6a665SVinicius Costa Gomes static bool should_restart_cycle(const struct sched_gate_list *oper,
6076ca6a665SVinicius Costa Gomes 				 const struct sched_entry *entry)
6086ca6a665SVinicius Costa Gomes {
6096ca6a665SVinicius Costa Gomes 	if (list_is_last(&entry->list, &oper->entries))
6106ca6a665SVinicius Costa Gomes 		return true;
6116ca6a665SVinicius Costa Gomes 
6126ca6a665SVinicius Costa Gomes 	if (ktime_compare(entry->close_time, oper->cycle_close_time) == 0)
6136ca6a665SVinicius Costa Gomes 		return true;
6146ca6a665SVinicius Costa Gomes 
6156ca6a665SVinicius Costa Gomes 	return false;
6166ca6a665SVinicius Costa Gomes }
6176ca6a665SVinicius Costa Gomes 
618a3d43c0dSVinicius Costa Gomes static bool should_change_schedules(const struct sched_gate_list *admin,
619a3d43c0dSVinicius Costa Gomes 				    const struct sched_gate_list *oper,
620a3d43c0dSVinicius Costa Gomes 				    ktime_t close_time)
621a3d43c0dSVinicius Costa Gomes {
622c25031e9SVinicius Costa Gomes 	ktime_t next_base_time, extension_time;
623a3d43c0dSVinicius Costa Gomes 
624a3d43c0dSVinicius Costa Gomes 	if (!admin)
625a3d43c0dSVinicius Costa Gomes 		return false;
626a3d43c0dSVinicius Costa Gomes 
627a3d43c0dSVinicius Costa Gomes 	next_base_time = sched_base_time(admin);
628a3d43c0dSVinicius Costa Gomes 
629a3d43c0dSVinicius Costa Gomes 	/* This is the simple case, the close_time would fall after
630a3d43c0dSVinicius Costa Gomes 	 * the next schedule base_time.
631a3d43c0dSVinicius Costa Gomes 	 */
632a3d43c0dSVinicius Costa Gomes 	if (ktime_compare(next_base_time, close_time) <= 0)
633a3d43c0dSVinicius Costa Gomes 		return true;
634a3d43c0dSVinicius Costa Gomes 
635c25031e9SVinicius Costa Gomes 	/* This is the cycle_time_extension case, if the close_time
636c25031e9SVinicius Costa Gomes 	 * plus the amount that can be extended would fall after the
637c25031e9SVinicius Costa Gomes 	 * next schedule base_time, we can extend the current schedule
638c25031e9SVinicius Costa Gomes 	 * for that amount.
639c25031e9SVinicius Costa Gomes 	 */
640c25031e9SVinicius Costa Gomes 	extension_time = ktime_add_ns(close_time, oper->cycle_time_extension);
641c25031e9SVinicius Costa Gomes 
642c25031e9SVinicius Costa Gomes 	/* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about
643c25031e9SVinicius Costa Gomes 	 * how precisely the extension should be made. So after
644c25031e9SVinicius Costa Gomes 	 * conformance testing, this logic may change.
645c25031e9SVinicius Costa Gomes 	 */
646c25031e9SVinicius Costa Gomes 	if (ktime_compare(next_base_time, extension_time) <= 0)
647c25031e9SVinicius Costa Gomes 		return true;
648c25031e9SVinicius Costa Gomes 
649a3d43c0dSVinicius Costa Gomes 	return false;
650a3d43c0dSVinicius Costa Gomes }
651a3d43c0dSVinicius Costa Gomes 
6525a781ccbSVinicius Costa Gomes static enum hrtimer_restart advance_sched(struct hrtimer *timer)
6535a781ccbSVinicius Costa Gomes {
6545a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = container_of(timer, struct taprio_sched,
6555a781ccbSVinicius Costa Gomes 					      advance_timer);
656a3d43c0dSVinicius Costa Gomes 	struct sched_gate_list *oper, *admin;
6575a781ccbSVinicius Costa Gomes 	struct sched_entry *entry, *next;
6585a781ccbSVinicius Costa Gomes 	struct Qdisc *sch = q->root;
6595a781ccbSVinicius Costa Gomes 	ktime_t close_time;
6605a781ccbSVinicius Costa Gomes 
6615a781ccbSVinicius Costa Gomes 	spin_lock(&q->current_entry_lock);
6625a781ccbSVinicius Costa Gomes 	entry = rcu_dereference_protected(q->current_entry,
6635a781ccbSVinicius Costa Gomes 					  lockdep_is_held(&q->current_entry_lock));
664a3d43c0dSVinicius Costa Gomes 	oper = rcu_dereference_protected(q->oper_sched,
665a3d43c0dSVinicius Costa Gomes 					 lockdep_is_held(&q->current_entry_lock));
666a3d43c0dSVinicius Costa Gomes 	admin = rcu_dereference_protected(q->admin_sched,
667a3d43c0dSVinicius Costa Gomes 					  lockdep_is_held(&q->current_entry_lock));
6685a781ccbSVinicius Costa Gomes 
669a3d43c0dSVinicius Costa Gomes 	if (!oper)
670a3d43c0dSVinicius Costa Gomes 		switch_schedules(q, &admin, &oper);
671a3d43c0dSVinicius Costa Gomes 
672a3d43c0dSVinicius Costa Gomes 	/* This can happen in two cases: 1. this is the very first run
673a3d43c0dSVinicius Costa Gomes 	 * of this function (i.e. we weren't running any schedule
674a3d43c0dSVinicius Costa Gomes 	 * previously); 2. The previous schedule just ended. The first
675a3d43c0dSVinicius Costa Gomes 	 * entry of all schedules are pre-calculated during the
676a3d43c0dSVinicius Costa Gomes 	 * schedule initialization.
6775a781ccbSVinicius Costa Gomes 	 */
678a3d43c0dSVinicius Costa Gomes 	if (unlikely(!entry || entry->close_time == oper->base_time)) {
679a3d43c0dSVinicius Costa Gomes 		next = list_first_entry(&oper->entries, struct sched_entry,
6805a781ccbSVinicius Costa Gomes 					list);
6815a781ccbSVinicius Costa Gomes 		close_time = next->close_time;
6825a781ccbSVinicius Costa Gomes 		goto first_run;
6835a781ccbSVinicius Costa Gomes 	}
6845a781ccbSVinicius Costa Gomes 
6856ca6a665SVinicius Costa Gomes 	if (should_restart_cycle(oper, entry)) {
686a3d43c0dSVinicius Costa Gomes 		next = list_first_entry(&oper->entries, struct sched_entry,
6875a781ccbSVinicius Costa Gomes 					list);
6886ca6a665SVinicius Costa Gomes 		oper->cycle_close_time = ktime_add_ns(oper->cycle_close_time,
6896ca6a665SVinicius Costa Gomes 						      oper->cycle_time);
6906ca6a665SVinicius Costa Gomes 	} else {
6915a781ccbSVinicius Costa Gomes 		next = list_next_entry(entry, list);
6926ca6a665SVinicius Costa Gomes 	}
6935a781ccbSVinicius Costa Gomes 
6945a781ccbSVinicius Costa Gomes 	close_time = ktime_add_ns(entry->close_time, next->interval);
6956ca6a665SVinicius Costa Gomes 	close_time = min_t(ktime_t, close_time, oper->cycle_close_time);
6965a781ccbSVinicius Costa Gomes 
697a3d43c0dSVinicius Costa Gomes 	if (should_change_schedules(admin, oper, close_time)) {
698a3d43c0dSVinicius Costa Gomes 		/* Set things so the next time this runs, the new
699a3d43c0dSVinicius Costa Gomes 		 * schedule runs.
700a3d43c0dSVinicius Costa Gomes 		 */
701a3d43c0dSVinicius Costa Gomes 		close_time = sched_base_time(admin);
702a3d43c0dSVinicius Costa Gomes 		switch_schedules(q, &admin, &oper);
703a3d43c0dSVinicius Costa Gomes 	}
704a3d43c0dSVinicius Costa Gomes 
7055a781ccbSVinicius Costa Gomes 	next->close_time = close_time;
70623bddf69SJakub Kicinski 	taprio_set_budget(q, next);
7075a781ccbSVinicius Costa Gomes 
7085a781ccbSVinicius Costa Gomes first_run:
7095a781ccbSVinicius Costa Gomes 	rcu_assign_pointer(q->current_entry, next);
7105a781ccbSVinicius Costa Gomes 	spin_unlock(&q->current_entry_lock);
7115a781ccbSVinicius Costa Gomes 
7125a781ccbSVinicius Costa Gomes 	hrtimer_set_expires(&q->advance_timer, close_time);
7135a781ccbSVinicius Costa Gomes 
7145a781ccbSVinicius Costa Gomes 	rcu_read_lock();
7155a781ccbSVinicius Costa Gomes 	__netif_schedule(sch);
7165a781ccbSVinicius Costa Gomes 	rcu_read_unlock();
7175a781ccbSVinicius Costa Gomes 
7185a781ccbSVinicius Costa Gomes 	return HRTIMER_RESTART;
7195a781ccbSVinicius Costa Gomes }
7205a781ccbSVinicius Costa Gomes 
7215a781ccbSVinicius Costa Gomes static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
7225a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_SCHED_ENTRY_INDEX]	   = { .type = NLA_U32 },
7235a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_SCHED_ENTRY_CMD]	   = { .type = NLA_U8 },
7245a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 },
7255a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]  = { .type = NLA_U32 },
7265a781ccbSVinicius Costa Gomes };
7275a781ccbSVinicius Costa Gomes 
728a54fc09eSVladimir Oltean static const struct nla_policy taprio_tc_policy[TCA_TAPRIO_TC_ENTRY_MAX + 1] = {
729a54fc09eSVladimir Oltean 	[TCA_TAPRIO_TC_ENTRY_INDEX]	   = { .type = NLA_U32 },
730a54fc09eSVladimir Oltean 	[TCA_TAPRIO_TC_ENTRY_MAX_SDU]	   = { .type = NLA_U32 },
731a54fc09eSVladimir Oltean };
732a54fc09eSVladimir Oltean 
7335a781ccbSVinicius Costa Gomes static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
7345a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_PRIOMAP]	       = {
7355a781ccbSVinicius Costa Gomes 		.len = sizeof(struct tc_mqprio_qopt)
7365a781ccbSVinicius Costa Gomes 	},
7375a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]           = { .type = NLA_NESTED },
7385a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]            = { .type = NLA_S64 },
7395a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]         = { .type = NLA_NESTED },
7405a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_SCHED_CLOCKID]              = { .type = NLA_S32 },
7416ca6a665SVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]           = { .type = NLA_S64 },
742c25031e9SVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
74349c684d7SVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_FLAGS]                      = { .type = NLA_U32 },
744e13aaa06SJakub Kicinski 	[TCA_TAPRIO_ATTR_TXTIME_DELAY]		     = { .type = NLA_U32 },
745a54fc09eSVladimir Oltean 	[TCA_TAPRIO_ATTR_TC_ENTRY]		     = { .type = NLA_NESTED },
7465a781ccbSVinicius Costa Gomes };
7475a781ccbSVinicius Costa Gomes 
748b5b73b26SVinicius Costa Gomes static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb,
749b5b73b26SVinicius Costa Gomes 			    struct sched_entry *entry,
7505a781ccbSVinicius Costa Gomes 			    struct netlink_ext_ack *extack)
7515a781ccbSVinicius Costa Gomes {
752b5b73b26SVinicius Costa Gomes 	int min_duration = length_to_duration(q, ETH_ZLEN);
7535a781ccbSVinicius Costa Gomes 	u32 interval = 0;
7545a781ccbSVinicius Costa Gomes 
7555a781ccbSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
7565a781ccbSVinicius Costa Gomes 		entry->command = nla_get_u8(
7575a781ccbSVinicius Costa Gomes 			tb[TCA_TAPRIO_SCHED_ENTRY_CMD]);
7585a781ccbSVinicius Costa Gomes 
7595a781ccbSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK])
7605a781ccbSVinicius Costa Gomes 		entry->gate_mask = nla_get_u32(
7615a781ccbSVinicius Costa Gomes 			tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]);
7625a781ccbSVinicius Costa Gomes 
7635a781ccbSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL])
7645a781ccbSVinicius Costa Gomes 		interval = nla_get_u32(
7655a781ccbSVinicius Costa Gomes 			tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
7665a781ccbSVinicius Costa Gomes 
767b5b73b26SVinicius Costa Gomes 	/* The interval should allow at least the minimum ethernet
768b5b73b26SVinicius Costa Gomes 	 * frame to go out.
769b5b73b26SVinicius Costa Gomes 	 */
770b5b73b26SVinicius Costa Gomes 	if (interval < min_duration) {
7715a781ccbSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
7725a781ccbSVinicius Costa Gomes 		return -EINVAL;
7735a781ccbSVinicius Costa Gomes 	}
7745a781ccbSVinicius Costa Gomes 
7755a781ccbSVinicius Costa Gomes 	entry->interval = interval;
7765a781ccbSVinicius Costa Gomes 
7775a781ccbSVinicius Costa Gomes 	return 0;
7785a781ccbSVinicius Costa Gomes }
7795a781ccbSVinicius Costa Gomes 
780b5b73b26SVinicius Costa Gomes static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n,
781b5b73b26SVinicius Costa Gomes 			     struct sched_entry *entry, int index,
782b5b73b26SVinicius Costa Gomes 			     struct netlink_ext_ack *extack)
7835a781ccbSVinicius Costa Gomes {
7845a781ccbSVinicius Costa Gomes 	struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
7855a781ccbSVinicius Costa Gomes 	int err;
7865a781ccbSVinicius Costa Gomes 
7878cb08174SJohannes Berg 	err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n,
7885a781ccbSVinicius Costa Gomes 					  entry_policy, NULL);
7895a781ccbSVinicius Costa Gomes 	if (err < 0) {
7905a781ccbSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Could not parse nested entry");
7915a781ccbSVinicius Costa Gomes 		return -EINVAL;
7925a781ccbSVinicius Costa Gomes 	}
7935a781ccbSVinicius Costa Gomes 
7945a781ccbSVinicius Costa Gomes 	entry->index = index;
7955a781ccbSVinicius Costa Gomes 
796b5b73b26SVinicius Costa Gomes 	return fill_sched_entry(q, tb, entry, extack);
7975a781ccbSVinicius Costa Gomes }
7985a781ccbSVinicius Costa Gomes 
799b5b73b26SVinicius Costa Gomes static int parse_sched_list(struct taprio_sched *q, struct nlattr *list,
800a3d43c0dSVinicius Costa Gomes 			    struct sched_gate_list *sched,
8015a781ccbSVinicius Costa Gomes 			    struct netlink_ext_ack *extack)
8025a781ccbSVinicius Costa Gomes {
8035a781ccbSVinicius Costa Gomes 	struct nlattr *n;
8045a781ccbSVinicius Costa Gomes 	int err, rem;
8055a781ccbSVinicius Costa Gomes 	int i = 0;
8065a781ccbSVinicius Costa Gomes 
8075a781ccbSVinicius Costa Gomes 	if (!list)
8085a781ccbSVinicius Costa Gomes 		return -EINVAL;
8095a781ccbSVinicius Costa Gomes 
8105a781ccbSVinicius Costa Gomes 	nla_for_each_nested(n, list, rem) {
8115a781ccbSVinicius Costa Gomes 		struct sched_entry *entry;
8125a781ccbSVinicius Costa Gomes 
8135a781ccbSVinicius Costa Gomes 		if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) {
8145a781ccbSVinicius Costa Gomes 			NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'");
8155a781ccbSVinicius Costa Gomes 			continue;
8165a781ccbSVinicius Costa Gomes 		}
8175a781ccbSVinicius Costa Gomes 
8185a781ccbSVinicius Costa Gomes 		entry = kzalloc(sizeof(*entry), GFP_KERNEL);
8195a781ccbSVinicius Costa Gomes 		if (!entry) {
8205a781ccbSVinicius Costa Gomes 			NL_SET_ERR_MSG(extack, "Not enough memory for entry");
8215a781ccbSVinicius Costa Gomes 			return -ENOMEM;
8225a781ccbSVinicius Costa Gomes 		}
8235a781ccbSVinicius Costa Gomes 
824b5b73b26SVinicius Costa Gomes 		err = parse_sched_entry(q, n, entry, i, extack);
8255a781ccbSVinicius Costa Gomes 		if (err < 0) {
8265a781ccbSVinicius Costa Gomes 			kfree(entry);
8275a781ccbSVinicius Costa Gomes 			return err;
8285a781ccbSVinicius Costa Gomes 		}
8295a781ccbSVinicius Costa Gomes 
830a3d43c0dSVinicius Costa Gomes 		list_add_tail(&entry->list, &sched->entries);
8315a781ccbSVinicius Costa Gomes 		i++;
8325a781ccbSVinicius Costa Gomes 	}
8335a781ccbSVinicius Costa Gomes 
834a3d43c0dSVinicius Costa Gomes 	sched->num_entries = i;
8355a781ccbSVinicius Costa Gomes 
8365a781ccbSVinicius Costa Gomes 	return i;
8375a781ccbSVinicius Costa Gomes }
8385a781ccbSVinicius Costa Gomes 
839b5b73b26SVinicius Costa Gomes static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
840a3d43c0dSVinicius Costa Gomes 				 struct sched_gate_list *new,
8415a781ccbSVinicius Costa Gomes 				 struct netlink_ext_ack *extack)
8425a781ccbSVinicius Costa Gomes {
8435a781ccbSVinicius Costa Gomes 	int err = 0;
8445a781ccbSVinicius Costa Gomes 
845a3d43c0dSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) {
846a3d43c0dSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Adding a single entry is not supported");
847a3d43c0dSVinicius Costa Gomes 		return -ENOTSUPP;
848a3d43c0dSVinicius Costa Gomes 	}
8495a781ccbSVinicius Costa Gomes 
8505a781ccbSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
851a3d43c0dSVinicius Costa Gomes 		new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
8525a781ccbSVinicius Costa Gomes 
853c25031e9SVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION])
854c25031e9SVinicius Costa Gomes 		new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]);
855c25031e9SVinicius Costa Gomes 
8566ca6a665SVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME])
8576ca6a665SVinicius Costa Gomes 		new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
8586ca6a665SVinicius Costa Gomes 
8595a781ccbSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
860b5b73b26SVinicius Costa Gomes 		err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST],
861b5b73b26SVinicius Costa Gomes 				       new, extack);
862a3d43c0dSVinicius Costa Gomes 	if (err < 0)
8635a781ccbSVinicius Costa Gomes 		return err;
864a3d43c0dSVinicius Costa Gomes 
865037be037SVedang Patel 	if (!new->cycle_time) {
866037be037SVedang Patel 		struct sched_entry *entry;
867037be037SVedang Patel 		ktime_t cycle = 0;
868037be037SVedang Patel 
869037be037SVedang Patel 		list_for_each_entry(entry, &new->entries, list)
870037be037SVedang Patel 			cycle = ktime_add_ns(cycle, entry->interval);
871ed8157f1SDu Cheng 
872ed8157f1SDu Cheng 		if (!cycle) {
873ed8157f1SDu Cheng 			NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
874ed8157f1SDu Cheng 			return -EINVAL;
875ed8157f1SDu Cheng 		}
876ed8157f1SDu Cheng 
877037be037SVedang Patel 		new->cycle_time = cycle;
878037be037SVedang Patel 	}
879037be037SVedang Patel 
880a3d43c0dSVinicius Costa Gomes 	return 0;
8815a781ccbSVinicius Costa Gomes }
8825a781ccbSVinicius Costa Gomes 
8835a781ccbSVinicius Costa Gomes static int taprio_parse_mqprio_opt(struct net_device *dev,
8845a781ccbSVinicius Costa Gomes 				   struct tc_mqprio_qopt *qopt,
8854cfd5779SVedang Patel 				   struct netlink_ext_ack *extack,
8864cfd5779SVedang Patel 				   u32 taprio_flags)
8875a781ccbSVinicius Costa Gomes {
8881dfe086dSVladimir Oltean 	bool allow_overlapping_txqs = TXTIME_ASSIST_IS_ENABLED(taprio_flags);
8895a781ccbSVinicius Costa Gomes 
890a3d43c0dSVinicius Costa Gomes 	if (!qopt && !dev->num_tc) {
8915a781ccbSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
8925a781ccbSVinicius Costa Gomes 		return -EINVAL;
8935a781ccbSVinicius Costa Gomes 	}
8945a781ccbSVinicius Costa Gomes 
895a3d43c0dSVinicius Costa Gomes 	/* If num_tc is already set, it means that the user already
896a3d43c0dSVinicius Costa Gomes 	 * configured the mqprio part
897a3d43c0dSVinicius Costa Gomes 	 */
898a3d43c0dSVinicius Costa Gomes 	if (dev->num_tc)
899a3d43c0dSVinicius Costa Gomes 		return 0;
900a3d43c0dSVinicius Costa Gomes 
9015a781ccbSVinicius Costa Gomes 	/* taprio imposes that traffic classes map 1:n to tx queues */
9025a781ccbSVinicius Costa Gomes 	if (qopt->num_tc > dev->num_tx_queues) {
9035a781ccbSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues");
9045a781ccbSVinicius Costa Gomes 		return -EINVAL;
9055a781ccbSVinicius Costa Gomes 	}
9065a781ccbSVinicius Costa Gomes 
9071dfe086dSVladimir Oltean 	/* For some reason, in txtime-assist mode, we allow TXQ ranges for
9081dfe086dSVladimir Oltean 	 * different TCs to overlap, and just validate the TXQ ranges.
9095a781ccbSVinicius Costa Gomes 	 */
9101dfe086dSVladimir Oltean 	return mqprio_validate_qopt(dev, qopt, true, allow_overlapping_txqs,
9111dfe086dSVladimir Oltean 				    extack);
9125a781ccbSVinicius Costa Gomes }
9135a781ccbSVinicius Costa Gomes 
914a3d43c0dSVinicius Costa Gomes static int taprio_get_start_time(struct Qdisc *sch,
915a3d43c0dSVinicius Costa Gomes 				 struct sched_gate_list *sched,
916a3d43c0dSVinicius Costa Gomes 				 ktime_t *start)
9175a781ccbSVinicius Costa Gomes {
9185a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
9195a781ccbSVinicius Costa Gomes 	ktime_t now, base, cycle;
9205a781ccbSVinicius Costa Gomes 	s64 n;
9215a781ccbSVinicius Costa Gomes 
922a3d43c0dSVinicius Costa Gomes 	base = sched_base_time(sched);
9237ede7b03SVedang Patel 	now = taprio_get_time(q);
9248599099fSAndre Guedes 
9258599099fSAndre Guedes 	if (ktime_after(base, now)) {
9268599099fSAndre Guedes 		*start = base;
9278599099fSAndre Guedes 		return 0;
9288599099fSAndre Guedes 	}
9295a781ccbSVinicius Costa Gomes 
930037be037SVedang Patel 	cycle = sched->cycle_time;
9315a781ccbSVinicius Costa Gomes 
9328599099fSAndre Guedes 	/* The qdisc is expected to have at least one sched_entry.  Moreover,
9338599099fSAndre Guedes 	 * any entry must have 'interval' > 0. Thus if the cycle time is zero,
9348599099fSAndre Guedes 	 * something went really wrong. In that case, we should warn about this
9358599099fSAndre Guedes 	 * inconsistent state and return error.
9368599099fSAndre Guedes 	 */
9378599099fSAndre Guedes 	if (WARN_ON(!cycle))
9388599099fSAndre Guedes 		return -EFAULT;
9395a781ccbSVinicius Costa Gomes 
9405a781ccbSVinicius Costa Gomes 	/* Schedule the start time for the beginning of the next
9415a781ccbSVinicius Costa Gomes 	 * cycle.
9425a781ccbSVinicius Costa Gomes 	 */
9435a781ccbSVinicius Costa Gomes 	n = div64_s64(ktime_sub_ns(now, base), cycle);
9448599099fSAndre Guedes 	*start = ktime_add_ns(base, (n + 1) * cycle);
9458599099fSAndre Guedes 	return 0;
9465a781ccbSVinicius Costa Gomes }
9475a781ccbSVinicius Costa Gomes 
948a3d43c0dSVinicius Costa Gomes static void setup_first_close_time(struct taprio_sched *q,
949a3d43c0dSVinicius Costa Gomes 				   struct sched_gate_list *sched, ktime_t base)
9505a781ccbSVinicius Costa Gomes {
9515a781ccbSVinicius Costa Gomes 	struct sched_entry *first;
9526ca6a665SVinicius Costa Gomes 	ktime_t cycle;
9535a781ccbSVinicius Costa Gomes 
954a3d43c0dSVinicius Costa Gomes 	first = list_first_entry(&sched->entries,
955a3d43c0dSVinicius Costa Gomes 				 struct sched_entry, list);
9565a781ccbSVinicius Costa Gomes 
957037be037SVedang Patel 	cycle = sched->cycle_time;
9586ca6a665SVinicius Costa Gomes 
9596ca6a665SVinicius Costa Gomes 	/* FIXME: find a better place to do this */
9606ca6a665SVinicius Costa Gomes 	sched->cycle_close_time = ktime_add_ns(base, cycle);
9616ca6a665SVinicius Costa Gomes 
962a3d43c0dSVinicius Costa Gomes 	first->close_time = ktime_add_ns(base, first->interval);
96323bddf69SJakub Kicinski 	taprio_set_budget(q, first);
9645a781ccbSVinicius Costa Gomes 	rcu_assign_pointer(q->current_entry, NULL);
965a3d43c0dSVinicius Costa Gomes }
9665a781ccbSVinicius Costa Gomes 
967a3d43c0dSVinicius Costa Gomes static void taprio_start_sched(struct Qdisc *sch,
968a3d43c0dSVinicius Costa Gomes 			       ktime_t start, struct sched_gate_list *new)
969a3d43c0dSVinicius Costa Gomes {
970a3d43c0dSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
971a3d43c0dSVinicius Costa Gomes 	ktime_t expires;
972a3d43c0dSVinicius Costa Gomes 
9739c66d156SVinicius Costa Gomes 	if (FULL_OFFLOAD_IS_ENABLED(q->flags))
9749c66d156SVinicius Costa Gomes 		return;
9759c66d156SVinicius Costa Gomes 
976a3d43c0dSVinicius Costa Gomes 	expires = hrtimer_get_expires(&q->advance_timer);
977a3d43c0dSVinicius Costa Gomes 	if (expires == 0)
978a3d43c0dSVinicius Costa Gomes 		expires = KTIME_MAX;
979a3d43c0dSVinicius Costa Gomes 
980a3d43c0dSVinicius Costa Gomes 	/* If the new schedule starts before the next expiration, we
981a3d43c0dSVinicius Costa Gomes 	 * reprogram it to the earliest one, so we change the admin
982a3d43c0dSVinicius Costa Gomes 	 * schedule to the operational one at the right time.
983a3d43c0dSVinicius Costa Gomes 	 */
984a3d43c0dSVinicius Costa Gomes 	start = min_t(ktime_t, start, expires);
9855a781ccbSVinicius Costa Gomes 
9865a781ccbSVinicius Costa Gomes 	hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
9875a781ccbSVinicius Costa Gomes }
9885a781ccbSVinicius Costa Gomes 
9897b9eba7bSLeandro Dorileo static void taprio_set_picos_per_byte(struct net_device *dev,
9907b9eba7bSLeandro Dorileo 				      struct taprio_sched *q)
9917b9eba7bSLeandro Dorileo {
9927b9eba7bSLeandro Dorileo 	struct ethtool_link_ksettings ecmd;
993f04b514cSVladimir Oltean 	int speed = SPEED_10;
994f04b514cSVladimir Oltean 	int picos_per_byte;
995f04b514cSVladimir Oltean 	int err;
9967b9eba7bSLeandro Dorileo 
997f04b514cSVladimir Oltean 	err = __ethtool_get_link_ksettings(dev, &ecmd);
998f04b514cSVladimir Oltean 	if (err < 0)
999f04b514cSVladimir Oltean 		goto skip;
1000f04b514cSVladimir Oltean 
10019a9251a3SVladimir Oltean 	if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
1002f04b514cSVladimir Oltean 		speed = ecmd.base.speed;
1003f04b514cSVladimir Oltean 
1004f04b514cSVladimir Oltean skip:
100568ce6688SVladimir Oltean 	picos_per_byte = (USEC_PER_SEC * 8) / speed;
10067b9eba7bSLeandro Dorileo 
10077b9eba7bSLeandro Dorileo 	atomic64_set(&q->picos_per_byte, picos_per_byte);
10087b9eba7bSLeandro Dorileo 	netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
10097b9eba7bSLeandro Dorileo 		   dev->name, (long long)atomic64_read(&q->picos_per_byte),
10107b9eba7bSLeandro Dorileo 		   ecmd.base.speed);
10117b9eba7bSLeandro Dorileo }
10127b9eba7bSLeandro Dorileo 
10137b9eba7bSLeandro Dorileo static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
10147b9eba7bSLeandro Dorileo 			       void *ptr)
10157b9eba7bSLeandro Dorileo {
10167b9eba7bSLeandro Dorileo 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
10177b9eba7bSLeandro Dorileo 	struct taprio_sched *q;
10187b9eba7bSLeandro Dorileo 
10197b9eba7bSLeandro Dorileo 	ASSERT_RTNL();
10207b9eba7bSLeandro Dorileo 
10217b9eba7bSLeandro Dorileo 	if (event != NETDEV_UP && event != NETDEV_CHANGE)
10227b9eba7bSLeandro Dorileo 		return NOTIFY_DONE;
10237b9eba7bSLeandro Dorileo 
10247b9eba7bSLeandro Dorileo 	list_for_each_entry(q, &taprio_list, taprio_list) {
1025fc4f2fd0SVladimir Oltean 		if (dev != qdisc_dev(q->root))
1026fc4f2fd0SVladimir Oltean 			continue;
1027fc4f2fd0SVladimir Oltean 
1028fc4f2fd0SVladimir Oltean 		taprio_set_picos_per_byte(dev, q);
10297b9eba7bSLeandro Dorileo 		break;
10307b9eba7bSLeandro Dorileo 	}
10317b9eba7bSLeandro Dorileo 
10327b9eba7bSLeandro Dorileo 	return NOTIFY_DONE;
10337b9eba7bSLeandro Dorileo }
10347b9eba7bSLeandro Dorileo 
10354cfd5779SVedang Patel static void setup_txtime(struct taprio_sched *q,
10364cfd5779SVedang Patel 			 struct sched_gate_list *sched, ktime_t base)
10374cfd5779SVedang Patel {
10384cfd5779SVedang Patel 	struct sched_entry *entry;
10394cfd5779SVedang Patel 	u32 interval = 0;
10404cfd5779SVedang Patel 
10414cfd5779SVedang Patel 	list_for_each_entry(entry, &sched->entries, list) {
10424cfd5779SVedang Patel 		entry->next_txtime = ktime_add_ns(base, interval);
10434cfd5779SVedang Patel 		interval += entry->interval;
10444cfd5779SVedang Patel 	}
10454cfd5779SVedang Patel }
10464cfd5779SVedang Patel 
10479c66d156SVinicius Costa Gomes static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries)
10489c66d156SVinicius Costa Gomes {
10499c66d156SVinicius Costa Gomes 	struct __tc_taprio_qopt_offload *__offload;
10509c66d156SVinicius Costa Gomes 
105111a33de2SGustavo A. R. Silva 	__offload = kzalloc(struct_size(__offload, offload.entries, num_entries),
105211a33de2SGustavo A. R. Silva 			    GFP_KERNEL);
10539c66d156SVinicius Costa Gomes 	if (!__offload)
10549c66d156SVinicius Costa Gomes 		return NULL;
10559c66d156SVinicius Costa Gomes 
10569c66d156SVinicius Costa Gomes 	refcount_set(&__offload->users, 1);
10579c66d156SVinicius Costa Gomes 
10589c66d156SVinicius Costa Gomes 	return &__offload->offload;
10599c66d156SVinicius Costa Gomes }
10609c66d156SVinicius Costa Gomes 
10619c66d156SVinicius Costa Gomes struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
10629c66d156SVinicius Costa Gomes 						  *offload)
10639c66d156SVinicius Costa Gomes {
10649c66d156SVinicius Costa Gomes 	struct __tc_taprio_qopt_offload *__offload;
10659c66d156SVinicius Costa Gomes 
10669c66d156SVinicius Costa Gomes 	__offload = container_of(offload, struct __tc_taprio_qopt_offload,
10679c66d156SVinicius Costa Gomes 				 offload);
10689c66d156SVinicius Costa Gomes 
10699c66d156SVinicius Costa Gomes 	refcount_inc(&__offload->users);
10709c66d156SVinicius Costa Gomes 
10719c66d156SVinicius Costa Gomes 	return offload;
10729c66d156SVinicius Costa Gomes }
10739c66d156SVinicius Costa Gomes EXPORT_SYMBOL_GPL(taprio_offload_get);
10749c66d156SVinicius Costa Gomes 
10759c66d156SVinicius Costa Gomes void taprio_offload_free(struct tc_taprio_qopt_offload *offload)
10769c66d156SVinicius Costa Gomes {
10779c66d156SVinicius Costa Gomes 	struct __tc_taprio_qopt_offload *__offload;
10789c66d156SVinicius Costa Gomes 
10799c66d156SVinicius Costa Gomes 	__offload = container_of(offload, struct __tc_taprio_qopt_offload,
10809c66d156SVinicius Costa Gomes 				 offload);
10819c66d156SVinicius Costa Gomes 
10829c66d156SVinicius Costa Gomes 	if (!refcount_dec_and_test(&__offload->users))
10839c66d156SVinicius Costa Gomes 		return;
10849c66d156SVinicius Costa Gomes 
10859c66d156SVinicius Costa Gomes 	kfree(__offload);
10869c66d156SVinicius Costa Gomes }
10879c66d156SVinicius Costa Gomes EXPORT_SYMBOL_GPL(taprio_offload_free);
10889c66d156SVinicius Costa Gomes 
10899c66d156SVinicius Costa Gomes /* The function will only serve to keep the pointers to the "oper" and "admin"
10909c66d156SVinicius Costa Gomes  * schedules valid in relation to their base times, so when calling dump() the
10919c66d156SVinicius Costa Gomes  * users looks at the right schedules.
10929c66d156SVinicius Costa Gomes  * When using full offload, the admin configuration is promoted to oper at the
10939c66d156SVinicius Costa Gomes  * base_time in the PHC time domain.  But because the system time is not
10949c66d156SVinicius Costa Gomes  * necessarily in sync with that, we can't just trigger a hrtimer to call
10959c66d156SVinicius Costa Gomes  * switch_schedules at the right hardware time.
10969c66d156SVinicius Costa Gomes  * At the moment we call this by hand right away from taprio, but in the future
10979c66d156SVinicius Costa Gomes  * it will be useful to create a mechanism for drivers to notify taprio of the
10989c66d156SVinicius Costa Gomes  * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump().
10999c66d156SVinicius Costa Gomes  * This is left as TODO.
11009c66d156SVinicius Costa Gomes  */
1101d665c128SYi Wang static void taprio_offload_config_changed(struct taprio_sched *q)
11029c66d156SVinicius Costa Gomes {
11039c66d156SVinicius Costa Gomes 	struct sched_gate_list *oper, *admin;
11049c66d156SVinicius Costa Gomes 
1105c8cbe123SVladimir Oltean 	oper = rtnl_dereference(q->oper_sched);
1106c8cbe123SVladimir Oltean 	admin = rtnl_dereference(q->admin_sched);
11079c66d156SVinicius Costa Gomes 
11089c66d156SVinicius Costa Gomes 	switch_schedules(q, &admin, &oper);
11099c66d156SVinicius Costa Gomes }
11109c66d156SVinicius Costa Gomes 
111109e31cf0SVinicius Costa Gomes static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask)
111209e31cf0SVinicius Costa Gomes {
111309e31cf0SVinicius Costa Gomes 	u32 i, queue_mask = 0;
111409e31cf0SVinicius Costa Gomes 
111509e31cf0SVinicius Costa Gomes 	for (i = 0; i < dev->num_tc; i++) {
111609e31cf0SVinicius Costa Gomes 		u32 offset, count;
111709e31cf0SVinicius Costa Gomes 
111809e31cf0SVinicius Costa Gomes 		if (!(tc_mask & BIT(i)))
111909e31cf0SVinicius Costa Gomes 			continue;
112009e31cf0SVinicius Costa Gomes 
112109e31cf0SVinicius Costa Gomes 		offset = dev->tc_to_txq[i].offset;
112209e31cf0SVinicius Costa Gomes 		count = dev->tc_to_txq[i].count;
112309e31cf0SVinicius Costa Gomes 
112409e31cf0SVinicius Costa Gomes 		queue_mask |= GENMASK(offset + count - 1, offset);
112509e31cf0SVinicius Costa Gomes 	}
112609e31cf0SVinicius Costa Gomes 
112709e31cf0SVinicius Costa Gomes 	return queue_mask;
112809e31cf0SVinicius Costa Gomes }
112909e31cf0SVinicius Costa Gomes 
113009e31cf0SVinicius Costa Gomes static void taprio_sched_to_offload(struct net_device *dev,
11319c66d156SVinicius Costa Gomes 				    struct sched_gate_list *sched,
1132522d15eaSVladimir Oltean 				    struct tc_taprio_qopt_offload *offload,
1133522d15eaSVladimir Oltean 				    const struct tc_taprio_caps *caps)
11349c66d156SVinicius Costa Gomes {
11359c66d156SVinicius Costa Gomes 	struct sched_entry *entry;
11369c66d156SVinicius Costa Gomes 	int i = 0;
11379c66d156SVinicius Costa Gomes 
11389c66d156SVinicius Costa Gomes 	offload->base_time = sched->base_time;
11399c66d156SVinicius Costa Gomes 	offload->cycle_time = sched->cycle_time;
11409c66d156SVinicius Costa Gomes 	offload->cycle_time_extension = sched->cycle_time_extension;
11419c66d156SVinicius Costa Gomes 
11429c66d156SVinicius Costa Gomes 	list_for_each_entry(entry, &sched->entries, list) {
11439c66d156SVinicius Costa Gomes 		struct tc_taprio_sched_entry *e = &offload->entries[i];
11449c66d156SVinicius Costa Gomes 
11459c66d156SVinicius Costa Gomes 		e->command = entry->command;
11469c66d156SVinicius Costa Gomes 		e->interval = entry->interval;
1147522d15eaSVladimir Oltean 		if (caps->gate_mask_per_txq)
1148522d15eaSVladimir Oltean 			e->gate_mask = tc_map_to_queue_mask(dev,
1149522d15eaSVladimir Oltean 							    entry->gate_mask);
1150522d15eaSVladimir Oltean 		else
1151522d15eaSVladimir Oltean 			e->gate_mask = entry->gate_mask;
115209e31cf0SVinicius Costa Gomes 
11539c66d156SVinicius Costa Gomes 		i++;
11549c66d156SVinicius Costa Gomes 	}
11559c66d156SVinicius Costa Gomes 
11569c66d156SVinicius Costa Gomes 	offload->num_entries = i;
11579c66d156SVinicius Costa Gomes }
11589c66d156SVinicius Costa Gomes 
11599c66d156SVinicius Costa Gomes static int taprio_enable_offload(struct net_device *dev,
11609c66d156SVinicius Costa Gomes 				 struct taprio_sched *q,
11619c66d156SVinicius Costa Gomes 				 struct sched_gate_list *sched,
11629c66d156SVinicius Costa Gomes 				 struct netlink_ext_ack *extack)
11639c66d156SVinicius Costa Gomes {
11649c66d156SVinicius Costa Gomes 	const struct net_device_ops *ops = dev->netdev_ops;
11659c66d156SVinicius Costa Gomes 	struct tc_taprio_qopt_offload *offload;
1166a54fc09eSVladimir Oltean 	struct tc_taprio_caps caps;
1167a54fc09eSVladimir Oltean 	int tc, err = 0;
11689c66d156SVinicius Costa Gomes 
11699c66d156SVinicius Costa Gomes 	if (!ops->ndo_setup_tc) {
11709c66d156SVinicius Costa Gomes 		NL_SET_ERR_MSG(extack,
11719c66d156SVinicius Costa Gomes 			       "Device does not support taprio offload");
11729c66d156SVinicius Costa Gomes 		return -EOPNOTSUPP;
11739c66d156SVinicius Costa Gomes 	}
11749c66d156SVinicius Costa Gomes 
1175a54fc09eSVladimir Oltean 	qdisc_offload_query_caps(dev, TC_SETUP_QDISC_TAPRIO,
1176a54fc09eSVladimir Oltean 				 &caps, sizeof(caps));
1177a54fc09eSVladimir Oltean 
1178a54fc09eSVladimir Oltean 	if (!caps.supports_queue_max_sdu) {
1179a54fc09eSVladimir Oltean 		for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
1180a54fc09eSVladimir Oltean 			if (q->max_sdu[tc]) {
1181a54fc09eSVladimir Oltean 				NL_SET_ERR_MSG_MOD(extack,
1182a54fc09eSVladimir Oltean 						   "Device does not handle queueMaxSDU");
1183a54fc09eSVladimir Oltean 				return -EOPNOTSUPP;
1184a54fc09eSVladimir Oltean 			}
1185a54fc09eSVladimir Oltean 		}
1186a54fc09eSVladimir Oltean 	}
1187a54fc09eSVladimir Oltean 
11889c66d156SVinicius Costa Gomes 	offload = taprio_offload_alloc(sched->num_entries);
11899c66d156SVinicius Costa Gomes 	if (!offload) {
11909c66d156SVinicius Costa Gomes 		NL_SET_ERR_MSG(extack,
11919c66d156SVinicius Costa Gomes 			       "Not enough memory for enabling offload mode");
11929c66d156SVinicius Costa Gomes 		return -ENOMEM;
11939c66d156SVinicius Costa Gomes 	}
11949c66d156SVinicius Costa Gomes 	offload->enable = 1;
119509c794c0SVladimir Oltean 	mqprio_qopt_reconstruct(dev, &offload->mqprio.qopt);
1196522d15eaSVladimir Oltean 	taprio_sched_to_offload(dev, sched, offload, &caps);
11979c66d156SVinicius Costa Gomes 
1198a54fc09eSVladimir Oltean 	for (tc = 0; tc < TC_MAX_QUEUE; tc++)
1199a54fc09eSVladimir Oltean 		offload->max_sdu[tc] = q->max_sdu[tc];
1200a54fc09eSVladimir Oltean 
12019c66d156SVinicius Costa Gomes 	err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
12029c66d156SVinicius Costa Gomes 	if (err < 0) {
12039c66d156SVinicius Costa Gomes 		NL_SET_ERR_MSG(extack,
12049c66d156SVinicius Costa Gomes 			       "Device failed to setup taprio offload");
12059c66d156SVinicius Costa Gomes 		goto done;
12069c66d156SVinicius Costa Gomes 	}
12079c66d156SVinicius Costa Gomes 
1208db46e3a8SVladimir Oltean 	q->offloaded = true;
1209db46e3a8SVladimir Oltean 
12109c66d156SVinicius Costa Gomes done:
12119c66d156SVinicius Costa Gomes 	taprio_offload_free(offload);
12129c66d156SVinicius Costa Gomes 
12139c66d156SVinicius Costa Gomes 	return err;
12149c66d156SVinicius Costa Gomes }
12159c66d156SVinicius Costa Gomes 
12169c66d156SVinicius Costa Gomes static int taprio_disable_offload(struct net_device *dev,
12179c66d156SVinicius Costa Gomes 				  struct taprio_sched *q,
12189c66d156SVinicius Costa Gomes 				  struct netlink_ext_ack *extack)
12199c66d156SVinicius Costa Gomes {
12209c66d156SVinicius Costa Gomes 	const struct net_device_ops *ops = dev->netdev_ops;
12219c66d156SVinicius Costa Gomes 	struct tc_taprio_qopt_offload *offload;
12229c66d156SVinicius Costa Gomes 	int err;
12239c66d156SVinicius Costa Gomes 
1224db46e3a8SVladimir Oltean 	if (!q->offloaded)
12259c66d156SVinicius Costa Gomes 		return 0;
12269c66d156SVinicius Costa Gomes 
12279c66d156SVinicius Costa Gomes 	offload = taprio_offload_alloc(0);
12289c66d156SVinicius Costa Gomes 	if (!offload) {
12299c66d156SVinicius Costa Gomes 		NL_SET_ERR_MSG(extack,
12309c66d156SVinicius Costa Gomes 			       "Not enough memory to disable offload mode");
12319c66d156SVinicius Costa Gomes 		return -ENOMEM;
12329c66d156SVinicius Costa Gomes 	}
12339c66d156SVinicius Costa Gomes 	offload->enable = 0;
12349c66d156SVinicius Costa Gomes 
12359c66d156SVinicius Costa Gomes 	err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
12369c66d156SVinicius Costa Gomes 	if (err < 0) {
12379c66d156SVinicius Costa Gomes 		NL_SET_ERR_MSG(extack,
12389c66d156SVinicius Costa Gomes 			       "Device failed to disable offload");
12399c66d156SVinicius Costa Gomes 		goto out;
12409c66d156SVinicius Costa Gomes 	}
12419c66d156SVinicius Costa Gomes 
1242db46e3a8SVladimir Oltean 	q->offloaded = false;
1243db46e3a8SVladimir Oltean 
12449c66d156SVinicius Costa Gomes out:
12459c66d156SVinicius Costa Gomes 	taprio_offload_free(offload);
12469c66d156SVinicius Costa Gomes 
12479c66d156SVinicius Costa Gomes 	return err;
12489c66d156SVinicius Costa Gomes }
12499c66d156SVinicius Costa Gomes 
12509c66d156SVinicius Costa Gomes /* If full offload is enabled, the only possible clockid is the net device's
12519c66d156SVinicius Costa Gomes  * PHC. For that reason, specifying a clockid through netlink is incorrect.
12529c66d156SVinicius Costa Gomes  * For txtime-assist, it is implicitly assumed that the device's PHC is kept
12539c66d156SVinicius Costa Gomes  * in sync with the specified clockid via a user space daemon such as phc2sys.
12549c66d156SVinicius Costa Gomes  * For both software taprio and txtime-assist, the clockid is used for the
12559c66d156SVinicius Costa Gomes  * hrtimer that advances the schedule and hence mandatory.
12569c66d156SVinicius Costa Gomes  */
12579c66d156SVinicius Costa Gomes static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
12589c66d156SVinicius Costa Gomes 				struct netlink_ext_ack *extack)
12599c66d156SVinicius Costa Gomes {
12609c66d156SVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
12619c66d156SVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
12629c66d156SVinicius Costa Gomes 	int err = -EINVAL;
12639c66d156SVinicius Costa Gomes 
12649c66d156SVinicius Costa Gomes 	if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
12659c66d156SVinicius Costa Gomes 		const struct ethtool_ops *ops = dev->ethtool_ops;
12669c66d156SVinicius Costa Gomes 		struct ethtool_ts_info info = {
12679c66d156SVinicius Costa Gomes 			.cmd = ETHTOOL_GET_TS_INFO,
12689c66d156SVinicius Costa Gomes 			.phc_index = -1,
12699c66d156SVinicius Costa Gomes 		};
12709c66d156SVinicius Costa Gomes 
12719c66d156SVinicius Costa Gomes 		if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
12729c66d156SVinicius Costa Gomes 			NL_SET_ERR_MSG(extack,
12739c66d156SVinicius Costa Gomes 				       "The 'clockid' cannot be specified for full offload");
12749c66d156SVinicius Costa Gomes 			goto out;
12759c66d156SVinicius Costa Gomes 		}
12769c66d156SVinicius Costa Gomes 
12779c66d156SVinicius Costa Gomes 		if (ops && ops->get_ts_info)
12789c66d156SVinicius Costa Gomes 			err = ops->get_ts_info(dev, &info);
12799c66d156SVinicius Costa Gomes 
12809c66d156SVinicius Costa Gomes 		if (err || info.phc_index < 0) {
12819c66d156SVinicius Costa Gomes 			NL_SET_ERR_MSG(extack,
12829c66d156SVinicius Costa Gomes 				       "Device does not have a PTP clock");
12839c66d156SVinicius Costa Gomes 			err = -ENOTSUPP;
12849c66d156SVinicius Costa Gomes 			goto out;
12859c66d156SVinicius Costa Gomes 		}
12869c66d156SVinicius Costa Gomes 	} else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
12879c66d156SVinicius Costa Gomes 		int clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
12886dc25401SEric Dumazet 		enum tk_offsets tk_offset;
12899c66d156SVinicius Costa Gomes 
12909c66d156SVinicius Costa Gomes 		/* We only support static clockids and we don't allow
12919c66d156SVinicius Costa Gomes 		 * for it to be modified after the first init.
12929c66d156SVinicius Costa Gomes 		 */
12939c66d156SVinicius Costa Gomes 		if (clockid < 0 ||
12949c66d156SVinicius Costa Gomes 		    (q->clockid != -1 && q->clockid != clockid)) {
12959c66d156SVinicius Costa Gomes 			NL_SET_ERR_MSG(extack,
12969c66d156SVinicius Costa Gomes 				       "Changing the 'clockid' of a running schedule is not supported");
12979c66d156SVinicius Costa Gomes 			err = -ENOTSUPP;
12989c66d156SVinicius Costa Gomes 			goto out;
12999c66d156SVinicius Costa Gomes 		}
13009c66d156SVinicius Costa Gomes 
13019c66d156SVinicius Costa Gomes 		switch (clockid) {
13029c66d156SVinicius Costa Gomes 		case CLOCK_REALTIME:
13036dc25401SEric Dumazet 			tk_offset = TK_OFFS_REAL;
13049c66d156SVinicius Costa Gomes 			break;
13059c66d156SVinicius Costa Gomes 		case CLOCK_MONOTONIC:
13066dc25401SEric Dumazet 			tk_offset = TK_OFFS_MAX;
13079c66d156SVinicius Costa Gomes 			break;
13089c66d156SVinicius Costa Gomes 		case CLOCK_BOOTTIME:
13096dc25401SEric Dumazet 			tk_offset = TK_OFFS_BOOT;
13109c66d156SVinicius Costa Gomes 			break;
13119c66d156SVinicius Costa Gomes 		case CLOCK_TAI:
13126dc25401SEric Dumazet 			tk_offset = TK_OFFS_TAI;
13139c66d156SVinicius Costa Gomes 			break;
13149c66d156SVinicius Costa Gomes 		default:
13159c66d156SVinicius Costa Gomes 			NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
13169c66d156SVinicius Costa Gomes 			err = -EINVAL;
13179c66d156SVinicius Costa Gomes 			goto out;
13189c66d156SVinicius Costa Gomes 		}
13196dc25401SEric Dumazet 		/* This pairs with READ_ONCE() in taprio_mono_to_any */
13206dc25401SEric Dumazet 		WRITE_ONCE(q->tk_offset, tk_offset);
13219c66d156SVinicius Costa Gomes 
13229c66d156SVinicius Costa Gomes 		q->clockid = clockid;
13239c66d156SVinicius Costa Gomes 	} else {
13249c66d156SVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
13259c66d156SVinicius Costa Gomes 		goto out;
13269c66d156SVinicius Costa Gomes 	}
1327a954380aSVinicius Costa Gomes 
1328a954380aSVinicius Costa Gomes 	/* Everything went ok, return success. */
1329a954380aSVinicius Costa Gomes 	err = 0;
1330a954380aSVinicius Costa Gomes 
13319c66d156SVinicius Costa Gomes out:
13329c66d156SVinicius Costa Gomes 	return err;
13339c66d156SVinicius Costa Gomes }
13349c66d156SVinicius Costa Gomes 
1335a54fc09eSVladimir Oltean static int taprio_parse_tc_entry(struct Qdisc *sch,
1336a54fc09eSVladimir Oltean 				 struct nlattr *opt,
1337a54fc09eSVladimir Oltean 				 u32 max_sdu[TC_QOPT_MAX_QUEUE],
1338a54fc09eSVladimir Oltean 				 unsigned long *seen_tcs,
1339a54fc09eSVladimir Oltean 				 struct netlink_ext_ack *extack)
1340a54fc09eSVladimir Oltean {
1341a54fc09eSVladimir Oltean 	struct nlattr *tb[TCA_TAPRIO_TC_ENTRY_MAX + 1] = { };
1342a54fc09eSVladimir Oltean 	struct net_device *dev = qdisc_dev(sch);
1343a54fc09eSVladimir Oltean 	u32 val = 0;
1344a54fc09eSVladimir Oltean 	int err, tc;
1345a54fc09eSVladimir Oltean 
1346a54fc09eSVladimir Oltean 	err = nla_parse_nested(tb, TCA_TAPRIO_TC_ENTRY_MAX, opt,
1347a54fc09eSVladimir Oltean 			       taprio_tc_policy, extack);
1348a54fc09eSVladimir Oltean 	if (err < 0)
1349a54fc09eSVladimir Oltean 		return err;
1350a54fc09eSVladimir Oltean 
1351a54fc09eSVladimir Oltean 	if (!tb[TCA_TAPRIO_TC_ENTRY_INDEX]) {
1352a54fc09eSVladimir Oltean 		NL_SET_ERR_MSG_MOD(extack, "TC entry index missing");
1353a54fc09eSVladimir Oltean 		return -EINVAL;
1354a54fc09eSVladimir Oltean 	}
1355a54fc09eSVladimir Oltean 
1356a54fc09eSVladimir Oltean 	tc = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_INDEX]);
1357a54fc09eSVladimir Oltean 	if (tc >= TC_QOPT_MAX_QUEUE) {
1358a54fc09eSVladimir Oltean 		NL_SET_ERR_MSG_MOD(extack, "TC entry index out of range");
1359a54fc09eSVladimir Oltean 		return -ERANGE;
1360a54fc09eSVladimir Oltean 	}
1361a54fc09eSVladimir Oltean 
1362a54fc09eSVladimir Oltean 	if (*seen_tcs & BIT(tc)) {
1363a54fc09eSVladimir Oltean 		NL_SET_ERR_MSG_MOD(extack, "Duplicate TC entry");
1364a54fc09eSVladimir Oltean 		return -EINVAL;
1365a54fc09eSVladimir Oltean 	}
1366a54fc09eSVladimir Oltean 
1367a54fc09eSVladimir Oltean 	*seen_tcs |= BIT(tc);
1368a54fc09eSVladimir Oltean 
1369a54fc09eSVladimir Oltean 	if (tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU])
1370a54fc09eSVladimir Oltean 		val = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]);
1371a54fc09eSVladimir Oltean 
1372a54fc09eSVladimir Oltean 	if (val > dev->max_mtu) {
1373a54fc09eSVladimir Oltean 		NL_SET_ERR_MSG_MOD(extack, "TC max SDU exceeds device max MTU");
1374a54fc09eSVladimir Oltean 		return -ERANGE;
1375a54fc09eSVladimir Oltean 	}
1376a54fc09eSVladimir Oltean 
1377a54fc09eSVladimir Oltean 	max_sdu[tc] = val;
1378a54fc09eSVladimir Oltean 
1379a54fc09eSVladimir Oltean 	return 0;
1380a54fc09eSVladimir Oltean }
1381a54fc09eSVladimir Oltean 
1382a54fc09eSVladimir Oltean static int taprio_parse_tc_entries(struct Qdisc *sch,
1383a54fc09eSVladimir Oltean 				   struct nlattr *opt,
1384a54fc09eSVladimir Oltean 				   struct netlink_ext_ack *extack)
1385a54fc09eSVladimir Oltean {
1386a54fc09eSVladimir Oltean 	struct taprio_sched *q = qdisc_priv(sch);
1387a54fc09eSVladimir Oltean 	struct net_device *dev = qdisc_dev(sch);
1388a54fc09eSVladimir Oltean 	u32 max_sdu[TC_QOPT_MAX_QUEUE];
1389a54fc09eSVladimir Oltean 	unsigned long seen_tcs = 0;
1390a54fc09eSVladimir Oltean 	struct nlattr *n;
1391a54fc09eSVladimir Oltean 	int tc, rem;
1392a54fc09eSVladimir Oltean 	int err = 0;
1393a54fc09eSVladimir Oltean 
1394a54fc09eSVladimir Oltean 	for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
1395a54fc09eSVladimir Oltean 		max_sdu[tc] = q->max_sdu[tc];
1396a54fc09eSVladimir Oltean 
1397a54fc09eSVladimir Oltean 	nla_for_each_nested(n, opt, rem) {
1398a54fc09eSVladimir Oltean 		if (nla_type(n) != TCA_TAPRIO_ATTR_TC_ENTRY)
1399a54fc09eSVladimir Oltean 			continue;
1400a54fc09eSVladimir Oltean 
1401a54fc09eSVladimir Oltean 		err = taprio_parse_tc_entry(sch, n, max_sdu, &seen_tcs, extack);
1402a54fc09eSVladimir Oltean 		if (err)
1403a54fc09eSVladimir Oltean 			goto out;
1404a54fc09eSVladimir Oltean 	}
1405a54fc09eSVladimir Oltean 
1406a54fc09eSVladimir Oltean 	for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
1407a54fc09eSVladimir Oltean 		q->max_sdu[tc] = max_sdu[tc];
1408a54fc09eSVladimir Oltean 		if (max_sdu[tc])
1409a54fc09eSVladimir Oltean 			q->max_frm_len[tc] = max_sdu[tc] + dev->hard_header_len;
1410a54fc09eSVladimir Oltean 		else
1411a54fc09eSVladimir Oltean 			q->max_frm_len[tc] = U32_MAX; /* never oversized */
1412a54fc09eSVladimir Oltean 	}
1413a54fc09eSVladimir Oltean 
1414a54fc09eSVladimir Oltean out:
1415a54fc09eSVladimir Oltean 	return err;
1416a54fc09eSVladimir Oltean }
1417a54fc09eSVladimir Oltean 
1418b5a0faa3SIvan Khoronzhuk static int taprio_mqprio_cmp(const struct net_device *dev,
1419b5a0faa3SIvan Khoronzhuk 			     const struct tc_mqprio_qopt *mqprio)
1420b5a0faa3SIvan Khoronzhuk {
1421b5a0faa3SIvan Khoronzhuk 	int i;
1422b5a0faa3SIvan Khoronzhuk 
1423b5a0faa3SIvan Khoronzhuk 	if (!mqprio || mqprio->num_tc != dev->num_tc)
1424b5a0faa3SIvan Khoronzhuk 		return -1;
1425b5a0faa3SIvan Khoronzhuk 
1426b5a0faa3SIvan Khoronzhuk 	for (i = 0; i < mqprio->num_tc; i++)
1427b5a0faa3SIvan Khoronzhuk 		if (dev->tc_to_txq[i].count != mqprio->count[i] ||
1428b5a0faa3SIvan Khoronzhuk 		    dev->tc_to_txq[i].offset != mqprio->offset[i])
1429b5a0faa3SIvan Khoronzhuk 			return -1;
1430b5a0faa3SIvan Khoronzhuk 
1431b5a0faa3SIvan Khoronzhuk 	for (i = 0; i <= TC_BITMASK; i++)
1432b5a0faa3SIvan Khoronzhuk 		if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i])
1433b5a0faa3SIvan Khoronzhuk 			return -1;
1434b5a0faa3SIvan Khoronzhuk 
1435b5a0faa3SIvan Khoronzhuk 	return 0;
1436b5a0faa3SIvan Khoronzhuk }
1437b5a0faa3SIvan Khoronzhuk 
1438a9d62274SVinicius Costa Gomes /* The semantics of the 'flags' argument in relation to 'change()'
1439a9d62274SVinicius Costa Gomes  * requests, are interpreted following two rules (which are applied in
1440a9d62274SVinicius Costa Gomes  * this order): (1) an omitted 'flags' argument is interpreted as
1441a9d62274SVinicius Costa Gomes  * zero; (2) the 'flags' of a "running" taprio instance cannot be
1442a9d62274SVinicius Costa Gomes  * changed.
1443a9d62274SVinicius Costa Gomes  */
1444a9d62274SVinicius Costa Gomes static int taprio_new_flags(const struct nlattr *attr, u32 old,
1445a9d62274SVinicius Costa Gomes 			    struct netlink_ext_ack *extack)
1446a9d62274SVinicius Costa Gomes {
1447a9d62274SVinicius Costa Gomes 	u32 new = 0;
1448a9d62274SVinicius Costa Gomes 
1449a9d62274SVinicius Costa Gomes 	if (attr)
1450a9d62274SVinicius Costa Gomes 		new = nla_get_u32(attr);
1451a9d62274SVinicius Costa Gomes 
1452a9d62274SVinicius Costa Gomes 	if (old != TAPRIO_FLAGS_INVALID && old != new) {
1453a9d62274SVinicius Costa Gomes 		NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
1454a9d62274SVinicius Costa Gomes 		return -EOPNOTSUPP;
1455a9d62274SVinicius Costa Gomes 	}
1456a9d62274SVinicius Costa Gomes 
1457a9d62274SVinicius Costa Gomes 	if (!taprio_flags_valid(new)) {
1458a9d62274SVinicius Costa Gomes 		NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
1459a9d62274SVinicius Costa Gomes 		return -EINVAL;
1460a9d62274SVinicius Costa Gomes 	}
1461a9d62274SVinicius Costa Gomes 
1462a9d62274SVinicius Costa Gomes 	return new;
1463a9d62274SVinicius Costa Gomes }
1464a9d62274SVinicius Costa Gomes 
14655a781ccbSVinicius Costa Gomes static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
14665a781ccbSVinicius Costa Gomes 			 struct netlink_ext_ack *extack)
14675a781ccbSVinicius Costa Gomes {
14685a781ccbSVinicius Costa Gomes 	struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
1469a3d43c0dSVinicius Costa Gomes 	struct sched_gate_list *oper, *admin, *new_admin;
14705a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
14715a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
14725a781ccbSVinicius Costa Gomes 	struct tc_mqprio_qopt *mqprio = NULL;
1473a3d43c0dSVinicius Costa Gomes 	unsigned long flags;
14745a781ccbSVinicius Costa Gomes 	ktime_t start;
14759c66d156SVinicius Costa Gomes 	int i, err;
14765a781ccbSVinicius Costa Gomes 
14778cb08174SJohannes Berg 	err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt,
14785a781ccbSVinicius Costa Gomes 					  taprio_policy, extack);
14795a781ccbSVinicius Costa Gomes 	if (err < 0)
14805a781ccbSVinicius Costa Gomes 		return err;
14815a781ccbSVinicius Costa Gomes 
14825a781ccbSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
14835a781ccbSVinicius Costa Gomes 		mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
14845a781ccbSVinicius Costa Gomes 
1485a9d62274SVinicius Costa Gomes 	err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS],
1486a9d62274SVinicius Costa Gomes 			       q->flags, extack);
1487a9d62274SVinicius Costa Gomes 	if (err < 0)
1488a9d62274SVinicius Costa Gomes 		return err;
14894cfd5779SVedang Patel 
1490a9d62274SVinicius Costa Gomes 	q->flags = err;
14914cfd5779SVedang Patel 
1492a9d62274SVinicius Costa Gomes 	err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
14935a781ccbSVinicius Costa Gomes 	if (err < 0)
14945a781ccbSVinicius Costa Gomes 		return err;
14955a781ccbSVinicius Costa Gomes 
1496a54fc09eSVladimir Oltean 	err = taprio_parse_tc_entries(sch, opt, extack);
1497a54fc09eSVladimir Oltean 	if (err)
1498a54fc09eSVladimir Oltean 		return err;
1499a54fc09eSVladimir Oltean 
1500a3d43c0dSVinicius Costa Gomes 	new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL);
1501a3d43c0dSVinicius Costa Gomes 	if (!new_admin) {
1502a3d43c0dSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule");
1503a3d43c0dSVinicius Costa Gomes 		return -ENOMEM;
1504a3d43c0dSVinicius Costa Gomes 	}
1505a3d43c0dSVinicius Costa Gomes 	INIT_LIST_HEAD(&new_admin->entries);
15065a781ccbSVinicius Costa Gomes 
150718cdd2f0SVladimir Oltean 	oper = rtnl_dereference(q->oper_sched);
150818cdd2f0SVladimir Oltean 	admin = rtnl_dereference(q->admin_sched);
15095a781ccbSVinicius Costa Gomes 
1510b5a0faa3SIvan Khoronzhuk 	/* no changes - no new mqprio settings */
1511b5a0faa3SIvan Khoronzhuk 	if (!taprio_mqprio_cmp(dev, mqprio))
1512b5a0faa3SIvan Khoronzhuk 		mqprio = NULL;
1513b5a0faa3SIvan Khoronzhuk 
1514a3d43c0dSVinicius Costa Gomes 	if (mqprio && (oper || admin)) {
1515a3d43c0dSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported");
1516a3d43c0dSVinicius Costa Gomes 		err = -ENOTSUPP;
1517a3d43c0dSVinicius Costa Gomes 		goto free_sched;
15185a781ccbSVinicius Costa Gomes 	}
15195a781ccbSVinicius Costa Gomes 
1520b5b73b26SVinicius Costa Gomes 	err = parse_taprio_schedule(q, tb, new_admin, extack);
1521a3d43c0dSVinicius Costa Gomes 	if (err < 0)
1522a3d43c0dSVinicius Costa Gomes 		goto free_sched;
15235a781ccbSVinicius Costa Gomes 
1524a3d43c0dSVinicius Costa Gomes 	if (new_admin->num_entries == 0) {
1525a3d43c0dSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule");
1526a3d43c0dSVinicius Costa Gomes 		err = -EINVAL;
1527a3d43c0dSVinicius Costa Gomes 		goto free_sched;
1528a3d43c0dSVinicius Costa Gomes 	}
15295a781ccbSVinicius Costa Gomes 
15309c66d156SVinicius Costa Gomes 	err = taprio_parse_clockid(sch, tb, extack);
15319c66d156SVinicius Costa Gomes 	if (err < 0)
1532a3d43c0dSVinicius Costa Gomes 		goto free_sched;
1533a3d43c0dSVinicius Costa Gomes 
1534a3d43c0dSVinicius Costa Gomes 	taprio_set_picos_per_byte(dev, q);
1535a3d43c0dSVinicius Costa Gomes 
15365652e63dSVinicius Costa Gomes 	if (mqprio) {
1537efe487fcSHaimin Zhang 		err = netdev_set_num_tc(dev, mqprio->num_tc);
1538efe487fcSHaimin Zhang 		if (err)
1539efe487fcSHaimin Zhang 			goto free_sched;
15405652e63dSVinicius Costa Gomes 		for (i = 0; i < mqprio->num_tc; i++)
15415652e63dSVinicius Costa Gomes 			netdev_set_tc_queue(dev, i,
15425652e63dSVinicius Costa Gomes 					    mqprio->count[i],
15435652e63dSVinicius Costa Gomes 					    mqprio->offset[i]);
15445652e63dSVinicius Costa Gomes 
15455652e63dSVinicius Costa Gomes 		/* Always use supplied priority mappings */
15465652e63dSVinicius Costa Gomes 		for (i = 0; i <= TC_BITMASK; i++)
15475652e63dSVinicius Costa Gomes 			netdev_set_prio_tc_map(dev, i,
15485652e63dSVinicius Costa Gomes 					       mqprio->prio_tc_map[i]);
15495652e63dSVinicius Costa Gomes 	}
15505652e63dSVinicius Costa Gomes 
1551a9d62274SVinicius Costa Gomes 	if (FULL_OFFLOAD_IS_ENABLED(q->flags))
155209e31cf0SVinicius Costa Gomes 		err = taprio_enable_offload(dev, q, new_admin, extack);
15539c66d156SVinicius Costa Gomes 	else
15549c66d156SVinicius Costa Gomes 		err = taprio_disable_offload(dev, q, extack);
15559c66d156SVinicius Costa Gomes 	if (err)
15569c66d156SVinicius Costa Gomes 		goto free_sched;
15579c66d156SVinicius Costa Gomes 
1558a3d43c0dSVinicius Costa Gomes 	/* Protects against enqueue()/dequeue() */
1559a3d43c0dSVinicius Costa Gomes 	spin_lock_bh(qdisc_lock(sch));
1560a3d43c0dSVinicius Costa Gomes 
15614cfd5779SVedang Patel 	if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) {
15624cfd5779SVedang Patel 		if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) {
15634cfd5779SVedang Patel 			NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled");
15644cfd5779SVedang Patel 			err = -EINVAL;
15654cfd5779SVedang Patel 			goto unlock;
15664cfd5779SVedang Patel 		}
15674cfd5779SVedang Patel 
1568a5b64700SVedang Patel 		q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
15694cfd5779SVedang Patel 	}
15704cfd5779SVedang Patel 
1571a9d62274SVinicius Costa Gomes 	if (!TXTIME_ASSIST_IS_ENABLED(q->flags) &&
1572a9d62274SVinicius Costa Gomes 	    !FULL_OFFLOAD_IS_ENABLED(q->flags) &&
15734cfd5779SVedang Patel 	    !hrtimer_active(&q->advance_timer)) {
1574a3d43c0dSVinicius Costa Gomes 		hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
1575a3d43c0dSVinicius Costa Gomes 		q->advance_timer.function = advance_sched;
15765a781ccbSVinicius Costa Gomes 	}
15775a781ccbSVinicius Costa Gomes 
1578a3d43c0dSVinicius Costa Gomes 	err = taprio_get_start_time(sch, new_admin, &start);
1579a3d43c0dSVinicius Costa Gomes 	if (err < 0) {
1580a3d43c0dSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Internal error: failed get start time");
1581a3d43c0dSVinicius Costa Gomes 		goto unlock;
1582a3d43c0dSVinicius Costa Gomes 	}
15835a781ccbSVinicius Costa Gomes 
15844cfd5779SVedang Patel 	setup_txtime(q, new_admin, start);
15854cfd5779SVedang Patel 
1586bfabd41dSVinicius Costa Gomes 	if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
15874cfd5779SVedang Patel 		if (!oper) {
15884cfd5779SVedang Patel 			rcu_assign_pointer(q->oper_sched, new_admin);
15894cfd5779SVedang Patel 			err = 0;
15904cfd5779SVedang Patel 			new_admin = NULL;
15914cfd5779SVedang Patel 			goto unlock;
15924cfd5779SVedang Patel 		}
15934cfd5779SVedang Patel 
15944cfd5779SVedang Patel 		rcu_assign_pointer(q->admin_sched, new_admin);
15954cfd5779SVedang Patel 		if (admin)
15964cfd5779SVedang Patel 			call_rcu(&admin->rcu, taprio_free_sched_cb);
15974cfd5779SVedang Patel 	} else {
1598a3d43c0dSVinicius Costa Gomes 		setup_first_close_time(q, new_admin, start);
1599a3d43c0dSVinicius Costa Gomes 
1600a3d43c0dSVinicius Costa Gomes 		/* Protects against advance_sched() */
1601a3d43c0dSVinicius Costa Gomes 		spin_lock_irqsave(&q->current_entry_lock, flags);
1602a3d43c0dSVinicius Costa Gomes 
1603a3d43c0dSVinicius Costa Gomes 		taprio_start_sched(sch, start, new_admin);
1604a3d43c0dSVinicius Costa Gomes 
1605a3d43c0dSVinicius Costa Gomes 		rcu_assign_pointer(q->admin_sched, new_admin);
1606a3d43c0dSVinicius Costa Gomes 		if (admin)
1607a3d43c0dSVinicius Costa Gomes 			call_rcu(&admin->rcu, taprio_free_sched_cb);
1608a3d43c0dSVinicius Costa Gomes 
1609a3d43c0dSVinicius Costa Gomes 		spin_unlock_irqrestore(&q->current_entry_lock, flags);
16100763b3e8SIvan Khoronzhuk 
1611a9d62274SVinicius Costa Gomes 		if (FULL_OFFLOAD_IS_ENABLED(q->flags))
16120763b3e8SIvan Khoronzhuk 			taprio_offload_config_changed(q);
16134cfd5779SVedang Patel 	}
1614a3d43c0dSVinicius Costa Gomes 
16154cfd5779SVedang Patel 	new_admin = NULL;
1616a3d43c0dSVinicius Costa Gomes 	err = 0;
1617a3d43c0dSVinicius Costa Gomes 
1618a3d43c0dSVinicius Costa Gomes unlock:
1619a3d43c0dSVinicius Costa Gomes 	spin_unlock_bh(qdisc_lock(sch));
1620a3d43c0dSVinicius Costa Gomes 
1621a3d43c0dSVinicius Costa Gomes free_sched:
162251650d33SIvan Khoronzhuk 	if (new_admin)
162351650d33SIvan Khoronzhuk 		call_rcu(&new_admin->rcu, taprio_free_sched_cb);
1624a3d43c0dSVinicius Costa Gomes 
1625a3d43c0dSVinicius Costa Gomes 	return err;
16265a781ccbSVinicius Costa Gomes }
16275a781ccbSVinicius Costa Gomes 
162844d4775cSDavide Caratti static void taprio_reset(struct Qdisc *sch)
162944d4775cSDavide Caratti {
163044d4775cSDavide Caratti 	struct taprio_sched *q = qdisc_priv(sch);
163144d4775cSDavide Caratti 	struct net_device *dev = qdisc_dev(sch);
163244d4775cSDavide Caratti 	int i;
163344d4775cSDavide Caratti 
163444d4775cSDavide Caratti 	hrtimer_cancel(&q->advance_timer);
16353a415d59SEric Dumazet 
163644d4775cSDavide Caratti 	if (q->qdiscs) {
1637698285daSDavide Caratti 		for (i = 0; i < dev->num_tx_queues; i++)
1638698285daSDavide Caratti 			if (q->qdiscs[i])
163944d4775cSDavide Caratti 				qdisc_reset(q->qdiscs[i]);
164044d4775cSDavide Caratti 	}
164144d4775cSDavide Caratti }
164244d4775cSDavide Caratti 
16435a781ccbSVinicius Costa Gomes static void taprio_destroy(struct Qdisc *sch)
16445a781ccbSVinicius Costa Gomes {
16455a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
16465a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
16479af23657SVladimir Oltean 	struct sched_gate_list *oper, *admin;
16485a781ccbSVinicius Costa Gomes 	unsigned int i;
16495a781ccbSVinicius Costa Gomes 
16507b9eba7bSLeandro Dorileo 	list_del(&q->taprio_list);
16517b9eba7bSLeandro Dorileo 
1652a56d447fSEric Dumazet 	/* Note that taprio_reset() might not be called if an error
1653a56d447fSEric Dumazet 	 * happens in qdisc_create(), after taprio_init() has been called.
1654a56d447fSEric Dumazet 	 */
1655a56d447fSEric Dumazet 	hrtimer_cancel(&q->advance_timer);
16563a415d59SEric Dumazet 	qdisc_synchronize(sch);
16575a781ccbSVinicius Costa Gomes 
16589c66d156SVinicius Costa Gomes 	taprio_disable_offload(dev, q, NULL);
16599c66d156SVinicius Costa Gomes 
16605a781ccbSVinicius Costa Gomes 	if (q->qdiscs) {
1661698285daSDavide Caratti 		for (i = 0; i < dev->num_tx_queues; i++)
16625a781ccbSVinicius Costa Gomes 			qdisc_put(q->qdiscs[i]);
16635a781ccbSVinicius Costa Gomes 
16645a781ccbSVinicius Costa Gomes 		kfree(q->qdiscs);
16655a781ccbSVinicius Costa Gomes 	}
16665a781ccbSVinicius Costa Gomes 	q->qdiscs = NULL;
16675a781ccbSVinicius Costa Gomes 
16687c16680aSVinicius Costa Gomes 	netdev_reset_tc(dev);
16695a781ccbSVinicius Costa Gomes 
16709af23657SVladimir Oltean 	oper = rtnl_dereference(q->oper_sched);
16719af23657SVladimir Oltean 	admin = rtnl_dereference(q->admin_sched);
1672a3d43c0dSVinicius Costa Gomes 
16739af23657SVladimir Oltean 	if (oper)
16749af23657SVladimir Oltean 		call_rcu(&oper->rcu, taprio_free_sched_cb);
16759af23657SVladimir Oltean 
16769af23657SVladimir Oltean 	if (admin)
16779af23657SVladimir Oltean 		call_rcu(&admin->rcu, taprio_free_sched_cb);
16785a781ccbSVinicius Costa Gomes }
16795a781ccbSVinicius Costa Gomes 
16805a781ccbSVinicius Costa Gomes static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
16815a781ccbSVinicius Costa Gomes 		       struct netlink_ext_ack *extack)
16825a781ccbSVinicius Costa Gomes {
16835a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
16845a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
1685a3d43c0dSVinicius Costa Gomes 	int i;
16865a781ccbSVinicius Costa Gomes 
16875a781ccbSVinicius Costa Gomes 	spin_lock_init(&q->current_entry_lock);
16885a781ccbSVinicius Costa Gomes 
16895a781ccbSVinicius Costa Gomes 	hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
1690a3d43c0dSVinicius Costa Gomes 	q->advance_timer.function = advance_sched;
16915a781ccbSVinicius Costa Gomes 
16925a781ccbSVinicius Costa Gomes 	q->root = sch;
16935a781ccbSVinicius Costa Gomes 
16945a781ccbSVinicius Costa Gomes 	/* We only support static clockids. Use an invalid value as default
16955a781ccbSVinicius Costa Gomes 	 * and get the valid one on taprio_change().
16965a781ccbSVinicius Costa Gomes 	 */
16975a781ccbSVinicius Costa Gomes 	q->clockid = -1;
1698a9d62274SVinicius Costa Gomes 	q->flags = TAPRIO_FLAGS_INVALID;
16995a781ccbSVinicius Costa Gomes 
1700efb55222SVladimir Oltean 	list_add(&q->taprio_list, &taprio_list);
1701efb55222SVladimir Oltean 
1702026de64dSVladimir Oltean 	if (sch->parent != TC_H_ROOT) {
1703026de64dSVladimir Oltean 		NL_SET_ERR_MSG_MOD(extack, "Can only be attached as root qdisc");
17045a781ccbSVinicius Costa Gomes 		return -EOPNOTSUPP;
1705026de64dSVladimir Oltean 	}
17065a781ccbSVinicius Costa Gomes 
1707026de64dSVladimir Oltean 	if (!netif_is_multiqueue(dev)) {
1708026de64dSVladimir Oltean 		NL_SET_ERR_MSG_MOD(extack, "Multi-queue device is required");
17095a781ccbSVinicius Costa Gomes 		return -EOPNOTSUPP;
1710026de64dSVladimir Oltean 	}
17115a781ccbSVinicius Costa Gomes 
17125a781ccbSVinicius Costa Gomes 	/* pre-allocate qdisc, attachment can't fail */
17135a781ccbSVinicius Costa Gomes 	q->qdiscs = kcalloc(dev->num_tx_queues,
17145a781ccbSVinicius Costa Gomes 			    sizeof(q->qdiscs[0]),
17155a781ccbSVinicius Costa Gomes 			    GFP_KERNEL);
17165a781ccbSVinicius Costa Gomes 
17175a781ccbSVinicius Costa Gomes 	if (!q->qdiscs)
17185a781ccbSVinicius Costa Gomes 		return -ENOMEM;
17195a781ccbSVinicius Costa Gomes 
17205a781ccbSVinicius Costa Gomes 	if (!opt)
17215a781ccbSVinicius Costa Gomes 		return -EINVAL;
17225a781ccbSVinicius Costa Gomes 
1723a3d43c0dSVinicius Costa Gomes 	for (i = 0; i < dev->num_tx_queues; i++) {
1724a3d43c0dSVinicius Costa Gomes 		struct netdev_queue *dev_queue;
1725a3d43c0dSVinicius Costa Gomes 		struct Qdisc *qdisc;
1726a3d43c0dSVinicius Costa Gomes 
1727a3d43c0dSVinicius Costa Gomes 		dev_queue = netdev_get_tx_queue(dev, i);
1728a3d43c0dSVinicius Costa Gomes 		qdisc = qdisc_create_dflt(dev_queue,
1729a3d43c0dSVinicius Costa Gomes 					  &pfifo_qdisc_ops,
1730a3d43c0dSVinicius Costa Gomes 					  TC_H_MAKE(TC_H_MAJ(sch->handle),
1731a3d43c0dSVinicius Costa Gomes 						    TC_H_MIN(i + 1)),
1732a3d43c0dSVinicius Costa Gomes 					  extack);
1733a3d43c0dSVinicius Costa Gomes 		if (!qdisc)
1734a3d43c0dSVinicius Costa Gomes 			return -ENOMEM;
1735a3d43c0dSVinicius Costa Gomes 
1736a3d43c0dSVinicius Costa Gomes 		if (i < dev->real_num_tx_queues)
1737a3d43c0dSVinicius Costa Gomes 			qdisc_hash_add(qdisc, false);
1738a3d43c0dSVinicius Costa Gomes 
1739a3d43c0dSVinicius Costa Gomes 		q->qdiscs[i] = qdisc;
1740a3d43c0dSVinicius Costa Gomes 	}
1741a3d43c0dSVinicius Costa Gomes 
17425a781ccbSVinicius Costa Gomes 	return taprio_change(sch, opt, extack);
17435a781ccbSVinicius Costa Gomes }
17445a781ccbSVinicius Costa Gomes 
174513511704SYannick Vignon static void taprio_attach(struct Qdisc *sch)
174613511704SYannick Vignon {
174713511704SYannick Vignon 	struct taprio_sched *q = qdisc_priv(sch);
174813511704SYannick Vignon 	struct net_device *dev = qdisc_dev(sch);
174913511704SYannick Vignon 	unsigned int ntx;
175013511704SYannick Vignon 
175113511704SYannick Vignon 	/* Attach underlying qdisc */
175213511704SYannick Vignon 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
175313511704SYannick Vignon 		struct Qdisc *qdisc = q->qdiscs[ntx];
175413511704SYannick Vignon 		struct Qdisc *old;
175513511704SYannick Vignon 
175613511704SYannick Vignon 		if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
175713511704SYannick Vignon 			qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
175813511704SYannick Vignon 			old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
175913511704SYannick Vignon 		} else {
176013511704SYannick Vignon 			old = dev_graft_qdisc(qdisc->dev_queue, sch);
176113511704SYannick Vignon 			qdisc_refcount_inc(sch);
176213511704SYannick Vignon 		}
176313511704SYannick Vignon 		if (old)
176413511704SYannick Vignon 			qdisc_put(old);
176513511704SYannick Vignon 	}
176613511704SYannick Vignon 
176713511704SYannick Vignon 	/* access to the child qdiscs is not needed in offload mode */
176813511704SYannick Vignon 	if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
176913511704SYannick Vignon 		kfree(q->qdiscs);
177013511704SYannick Vignon 		q->qdiscs = NULL;
177113511704SYannick Vignon 	}
177213511704SYannick Vignon }
177313511704SYannick Vignon 
17745a781ccbSVinicius Costa Gomes static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
17755a781ccbSVinicius Costa Gomes 					     unsigned long cl)
17765a781ccbSVinicius Costa Gomes {
17775a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
17785a781ccbSVinicius Costa Gomes 	unsigned long ntx = cl - 1;
17795a781ccbSVinicius Costa Gomes 
17805a781ccbSVinicius Costa Gomes 	if (ntx >= dev->num_tx_queues)
17815a781ccbSVinicius Costa Gomes 		return NULL;
17825a781ccbSVinicius Costa Gomes 
17835a781ccbSVinicius Costa Gomes 	return netdev_get_tx_queue(dev, ntx);
17845a781ccbSVinicius Costa Gomes }
17855a781ccbSVinicius Costa Gomes 
17865a781ccbSVinicius Costa Gomes static int taprio_graft(struct Qdisc *sch, unsigned long cl,
17875a781ccbSVinicius Costa Gomes 			struct Qdisc *new, struct Qdisc **old,
17885a781ccbSVinicius Costa Gomes 			struct netlink_ext_ack *extack)
17895a781ccbSVinicius Costa Gomes {
17905a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
17915a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
17925a781ccbSVinicius Costa Gomes 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
17935a781ccbSVinicius Costa Gomes 
17945a781ccbSVinicius Costa Gomes 	if (!dev_queue)
17955a781ccbSVinicius Costa Gomes 		return -EINVAL;
17965a781ccbSVinicius Costa Gomes 
17975a781ccbSVinicius Costa Gomes 	if (dev->flags & IFF_UP)
17985a781ccbSVinicius Costa Gomes 		dev_deactivate(dev);
17995a781ccbSVinicius Costa Gomes 
180013511704SYannick Vignon 	if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
180113511704SYannick Vignon 		*old = dev_graft_qdisc(dev_queue, new);
180213511704SYannick Vignon 	} else {
18035a781ccbSVinicius Costa Gomes 		*old = q->qdiscs[cl - 1];
18045a781ccbSVinicius Costa Gomes 		q->qdiscs[cl - 1] = new;
180513511704SYannick Vignon 	}
18065a781ccbSVinicius Costa Gomes 
18075a781ccbSVinicius Costa Gomes 	if (new)
18085a781ccbSVinicius Costa Gomes 		new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
18095a781ccbSVinicius Costa Gomes 
18105a781ccbSVinicius Costa Gomes 	if (dev->flags & IFF_UP)
18115a781ccbSVinicius Costa Gomes 		dev_activate(dev);
18125a781ccbSVinicius Costa Gomes 
18135a781ccbSVinicius Costa Gomes 	return 0;
18145a781ccbSVinicius Costa Gomes }
18155a781ccbSVinicius Costa Gomes 
18165a781ccbSVinicius Costa Gomes static int dump_entry(struct sk_buff *msg,
18175a781ccbSVinicius Costa Gomes 		      const struct sched_entry *entry)
18185a781ccbSVinicius Costa Gomes {
18195a781ccbSVinicius Costa Gomes 	struct nlattr *item;
18205a781ccbSVinicius Costa Gomes 
1821ae0be8deSMichal Kubecek 	item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY);
18225a781ccbSVinicius Costa Gomes 	if (!item)
18235a781ccbSVinicius Costa Gomes 		return -ENOSPC;
18245a781ccbSVinicius Costa Gomes 
18255a781ccbSVinicius Costa Gomes 	if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index))
18265a781ccbSVinicius Costa Gomes 		goto nla_put_failure;
18275a781ccbSVinicius Costa Gomes 
18285a781ccbSVinicius Costa Gomes 	if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command))
18295a781ccbSVinicius Costa Gomes 		goto nla_put_failure;
18305a781ccbSVinicius Costa Gomes 
18315a781ccbSVinicius Costa Gomes 	if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK,
18325a781ccbSVinicius Costa Gomes 			entry->gate_mask))
18335a781ccbSVinicius Costa Gomes 		goto nla_put_failure;
18345a781ccbSVinicius Costa Gomes 
18355a781ccbSVinicius Costa Gomes 	if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL,
18365a781ccbSVinicius Costa Gomes 			entry->interval))
18375a781ccbSVinicius Costa Gomes 		goto nla_put_failure;
18385a781ccbSVinicius Costa Gomes 
18395a781ccbSVinicius Costa Gomes 	return nla_nest_end(msg, item);
18405a781ccbSVinicius Costa Gomes 
18415a781ccbSVinicius Costa Gomes nla_put_failure:
18425a781ccbSVinicius Costa Gomes 	nla_nest_cancel(msg, item);
18435a781ccbSVinicius Costa Gomes 	return -1;
18445a781ccbSVinicius Costa Gomes }
18455a781ccbSVinicius Costa Gomes 
1846a3d43c0dSVinicius Costa Gomes static int dump_schedule(struct sk_buff *msg,
1847a3d43c0dSVinicius Costa Gomes 			 const struct sched_gate_list *root)
1848a3d43c0dSVinicius Costa Gomes {
1849a3d43c0dSVinicius Costa Gomes 	struct nlattr *entry_list;
1850a3d43c0dSVinicius Costa Gomes 	struct sched_entry *entry;
1851a3d43c0dSVinicius Costa Gomes 
1852a3d43c0dSVinicius Costa Gomes 	if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME,
1853a3d43c0dSVinicius Costa Gomes 			root->base_time, TCA_TAPRIO_PAD))
1854a3d43c0dSVinicius Costa Gomes 		return -1;
1855a3d43c0dSVinicius Costa Gomes 
18566ca6a665SVinicius Costa Gomes 	if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME,
18576ca6a665SVinicius Costa Gomes 			root->cycle_time, TCA_TAPRIO_PAD))
18586ca6a665SVinicius Costa Gomes 		return -1;
18596ca6a665SVinicius Costa Gomes 
1860c25031e9SVinicius Costa Gomes 	if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
1861c25031e9SVinicius Costa Gomes 			root->cycle_time_extension, TCA_TAPRIO_PAD))
1862c25031e9SVinicius Costa Gomes 		return -1;
1863c25031e9SVinicius Costa Gomes 
1864a3d43c0dSVinicius Costa Gomes 	entry_list = nla_nest_start_noflag(msg,
1865a3d43c0dSVinicius Costa Gomes 					   TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST);
1866a3d43c0dSVinicius Costa Gomes 	if (!entry_list)
1867a3d43c0dSVinicius Costa Gomes 		goto error_nest;
1868a3d43c0dSVinicius Costa Gomes 
1869a3d43c0dSVinicius Costa Gomes 	list_for_each_entry(entry, &root->entries, list) {
1870a3d43c0dSVinicius Costa Gomes 		if (dump_entry(msg, entry) < 0)
1871a3d43c0dSVinicius Costa Gomes 			goto error_nest;
1872a3d43c0dSVinicius Costa Gomes 	}
1873a3d43c0dSVinicius Costa Gomes 
1874a3d43c0dSVinicius Costa Gomes 	nla_nest_end(msg, entry_list);
1875a3d43c0dSVinicius Costa Gomes 	return 0;
1876a3d43c0dSVinicius Costa Gomes 
1877a3d43c0dSVinicius Costa Gomes error_nest:
1878a3d43c0dSVinicius Costa Gomes 	nla_nest_cancel(msg, entry_list);
1879a3d43c0dSVinicius Costa Gomes 	return -1;
1880a3d43c0dSVinicius Costa Gomes }
1881a3d43c0dSVinicius Costa Gomes 
1882a54fc09eSVladimir Oltean static int taprio_dump_tc_entries(struct taprio_sched *q, struct sk_buff *skb)
1883a54fc09eSVladimir Oltean {
1884a54fc09eSVladimir Oltean 	struct nlattr *n;
1885a54fc09eSVladimir Oltean 	int tc;
1886a54fc09eSVladimir Oltean 
1887a54fc09eSVladimir Oltean 	for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
1888a54fc09eSVladimir Oltean 		n = nla_nest_start(skb, TCA_TAPRIO_ATTR_TC_ENTRY);
1889a54fc09eSVladimir Oltean 		if (!n)
1890a54fc09eSVladimir Oltean 			return -EMSGSIZE;
1891a54fc09eSVladimir Oltean 
1892a54fc09eSVladimir Oltean 		if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_INDEX, tc))
1893a54fc09eSVladimir Oltean 			goto nla_put_failure;
1894a54fc09eSVladimir Oltean 
1895a54fc09eSVladimir Oltean 		if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_MAX_SDU,
1896a54fc09eSVladimir Oltean 				q->max_sdu[tc]))
1897a54fc09eSVladimir Oltean 			goto nla_put_failure;
1898a54fc09eSVladimir Oltean 
1899a54fc09eSVladimir Oltean 		nla_nest_end(skb, n);
1900a54fc09eSVladimir Oltean 	}
1901a54fc09eSVladimir Oltean 
1902a54fc09eSVladimir Oltean 	return 0;
1903a54fc09eSVladimir Oltean 
1904a54fc09eSVladimir Oltean nla_put_failure:
1905a54fc09eSVladimir Oltean 	nla_nest_cancel(skb, n);
1906a54fc09eSVladimir Oltean 	return -EMSGSIZE;
1907a54fc09eSVladimir Oltean }
1908a54fc09eSVladimir Oltean 
19095a781ccbSVinicius Costa Gomes static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
19105a781ccbSVinicius Costa Gomes {
19115a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
19125a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
1913a3d43c0dSVinicius Costa Gomes 	struct sched_gate_list *oper, *admin;
19145a781ccbSVinicius Costa Gomes 	struct tc_mqprio_qopt opt = { 0 };
1915a3d43c0dSVinicius Costa Gomes 	struct nlattr *nest, *sched_nest;
19165a781ccbSVinicius Costa Gomes 
191718cdd2f0SVladimir Oltean 	oper = rtnl_dereference(q->oper_sched);
191818cdd2f0SVladimir Oltean 	admin = rtnl_dereference(q->admin_sched);
1919a3d43c0dSVinicius Costa Gomes 
19209dd6ad67SVladimir Oltean 	mqprio_qopt_reconstruct(dev, &opt);
19215a781ccbSVinicius Costa Gomes 
1922ae0be8deSMichal Kubecek 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
19235a781ccbSVinicius Costa Gomes 	if (!nest)
1924a3d43c0dSVinicius Costa Gomes 		goto start_error;
19255a781ccbSVinicius Costa Gomes 
19265a781ccbSVinicius Costa Gomes 	if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
19275a781ccbSVinicius Costa Gomes 		goto options_error;
19285a781ccbSVinicius Costa Gomes 
19299c66d156SVinicius Costa Gomes 	if (!FULL_OFFLOAD_IS_ENABLED(q->flags) &&
19309c66d156SVinicius Costa Gomes 	    nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
19315a781ccbSVinicius Costa Gomes 		goto options_error;
19325a781ccbSVinicius Costa Gomes 
19334cfd5779SVedang Patel 	if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags))
19344cfd5779SVedang Patel 		goto options_error;
19354cfd5779SVedang Patel 
19364cfd5779SVedang Patel 	if (q->txtime_delay &&
1937a5b64700SVedang Patel 	    nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
19384cfd5779SVedang Patel 		goto options_error;
19394cfd5779SVedang Patel 
1940a54fc09eSVladimir Oltean 	if (taprio_dump_tc_entries(q, skb))
1941a54fc09eSVladimir Oltean 		goto options_error;
1942a54fc09eSVladimir Oltean 
1943a3d43c0dSVinicius Costa Gomes 	if (oper && dump_schedule(skb, oper))
19445a781ccbSVinicius Costa Gomes 		goto options_error;
19455a781ccbSVinicius Costa Gomes 
1946a3d43c0dSVinicius Costa Gomes 	if (!admin)
1947a3d43c0dSVinicius Costa Gomes 		goto done;
19485a781ccbSVinicius Costa Gomes 
1949a3d43c0dSVinicius Costa Gomes 	sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED);
1950e4acf427SColin Ian King 	if (!sched_nest)
1951e4acf427SColin Ian King 		goto options_error;
1952a3d43c0dSVinicius Costa Gomes 
1953a3d43c0dSVinicius Costa Gomes 	if (dump_schedule(skb, admin))
1954a3d43c0dSVinicius Costa Gomes 		goto admin_error;
1955a3d43c0dSVinicius Costa Gomes 
1956a3d43c0dSVinicius Costa Gomes 	nla_nest_end(skb, sched_nest);
1957a3d43c0dSVinicius Costa Gomes 
1958a3d43c0dSVinicius Costa Gomes done:
19595a781ccbSVinicius Costa Gomes 	return nla_nest_end(skb, nest);
19605a781ccbSVinicius Costa Gomes 
1961a3d43c0dSVinicius Costa Gomes admin_error:
1962a3d43c0dSVinicius Costa Gomes 	nla_nest_cancel(skb, sched_nest);
1963a3d43c0dSVinicius Costa Gomes 
19645a781ccbSVinicius Costa Gomes options_error:
19655a781ccbSVinicius Costa Gomes 	nla_nest_cancel(skb, nest);
1966a3d43c0dSVinicius Costa Gomes 
1967a3d43c0dSVinicius Costa Gomes start_error:
1968a3d43c0dSVinicius Costa Gomes 	return -ENOSPC;
19695a781ccbSVinicius Costa Gomes }
19705a781ccbSVinicius Costa Gomes 
19715a781ccbSVinicius Costa Gomes static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
19725a781ccbSVinicius Costa Gomes {
1973af7b29b1SVladimir Oltean 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
19745a781ccbSVinicius Costa Gomes 
1975af7b29b1SVladimir Oltean 	if (!dev_queue)
19765a781ccbSVinicius Costa Gomes 		return NULL;
19775a781ccbSVinicius Costa Gomes 
1978af7b29b1SVladimir Oltean 	return dev_queue->qdisc_sleeping;
19795a781ccbSVinicius Costa Gomes }
19805a781ccbSVinicius Costa Gomes 
19815a781ccbSVinicius Costa Gomes static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
19825a781ccbSVinicius Costa Gomes {
19835a781ccbSVinicius Costa Gomes 	unsigned int ntx = TC_H_MIN(classid);
19845a781ccbSVinicius Costa Gomes 
19855a781ccbSVinicius Costa Gomes 	if (!taprio_queue_get(sch, ntx))
19865a781ccbSVinicius Costa Gomes 		return 0;
19875a781ccbSVinicius Costa Gomes 	return ntx;
19885a781ccbSVinicius Costa Gomes }
19895a781ccbSVinicius Costa Gomes 
19905a781ccbSVinicius Costa Gomes static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
19915a781ccbSVinicius Costa Gomes 			     struct sk_buff *skb, struct tcmsg *tcm)
19925a781ccbSVinicius Costa Gomes {
19935a781ccbSVinicius Costa Gomes 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
19945a781ccbSVinicius Costa Gomes 
19955a781ccbSVinicius Costa Gomes 	tcm->tcm_parent = TC_H_ROOT;
19965a781ccbSVinicius Costa Gomes 	tcm->tcm_handle |= TC_H_MIN(cl);
19975a781ccbSVinicius Costa Gomes 	tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
19985a781ccbSVinicius Costa Gomes 
19995a781ccbSVinicius Costa Gomes 	return 0;
20005a781ccbSVinicius Costa Gomes }
20015a781ccbSVinicius Costa Gomes 
20025a781ccbSVinicius Costa Gomes static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
20035a781ccbSVinicius Costa Gomes 				   struct gnet_dump *d)
20045a781ccbSVinicius Costa Gomes 	__releases(d->lock)
20055a781ccbSVinicius Costa Gomes 	__acquires(d->lock)
20065a781ccbSVinicius Costa Gomes {
20075a781ccbSVinicius Costa Gomes 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
20085a781ccbSVinicius Costa Gomes 
20095a781ccbSVinicius Costa Gomes 	sch = dev_queue->qdisc_sleeping;
201029cbcd85SAhmed S. Darwish 	if (gnet_stats_copy_basic(d, NULL, &sch->bstats, true) < 0 ||
20115dd431b6SPaolo Abeni 	    qdisc_qstats_copy(d, sch) < 0)
20125a781ccbSVinicius Costa Gomes 		return -1;
20135a781ccbSVinicius Costa Gomes 	return 0;
20145a781ccbSVinicius Costa Gomes }
20155a781ccbSVinicius Costa Gomes 
20165a781ccbSVinicius Costa Gomes static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
20175a781ccbSVinicius Costa Gomes {
20185a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
20195a781ccbSVinicius Costa Gomes 	unsigned long ntx;
20205a781ccbSVinicius Costa Gomes 
20215a781ccbSVinicius Costa Gomes 	if (arg->stop)
20225a781ccbSVinicius Costa Gomes 		return;
20235a781ccbSVinicius Costa Gomes 
20245a781ccbSVinicius Costa Gomes 	arg->count = arg->skip;
20255a781ccbSVinicius Costa Gomes 	for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
2026e046fa89SZhengchao Shao 		if (!tc_qdisc_stats_dump(sch, ntx + 1, arg))
20275a781ccbSVinicius Costa Gomes 			break;
20285a781ccbSVinicius Costa Gomes 	}
20295a781ccbSVinicius Costa Gomes }
20305a781ccbSVinicius Costa Gomes 
20315a781ccbSVinicius Costa Gomes static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
20325a781ccbSVinicius Costa Gomes 						struct tcmsg *tcm)
20335a781ccbSVinicius Costa Gomes {
20345a781ccbSVinicius Costa Gomes 	return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
20355a781ccbSVinicius Costa Gomes }
20365a781ccbSVinicius Costa Gomes 
20375a781ccbSVinicius Costa Gomes static const struct Qdisc_class_ops taprio_class_ops = {
20385a781ccbSVinicius Costa Gomes 	.graft		= taprio_graft,
20395a781ccbSVinicius Costa Gomes 	.leaf		= taprio_leaf,
20405a781ccbSVinicius Costa Gomes 	.find		= taprio_find,
20415a781ccbSVinicius Costa Gomes 	.walk		= taprio_walk,
20425a781ccbSVinicius Costa Gomes 	.dump		= taprio_dump_class,
20435a781ccbSVinicius Costa Gomes 	.dump_stats	= taprio_dump_class_stats,
20445a781ccbSVinicius Costa Gomes 	.select_queue	= taprio_select_queue,
20455a781ccbSVinicius Costa Gomes };
20465a781ccbSVinicius Costa Gomes 
20475a781ccbSVinicius Costa Gomes static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
20485a781ccbSVinicius Costa Gomes 	.cl_ops		= &taprio_class_ops,
20495a781ccbSVinicius Costa Gomes 	.id		= "taprio",
20505a781ccbSVinicius Costa Gomes 	.priv_size	= sizeof(struct taprio_sched),
20515a781ccbSVinicius Costa Gomes 	.init		= taprio_init,
2052a3d43c0dSVinicius Costa Gomes 	.change		= taprio_change,
20535a781ccbSVinicius Costa Gomes 	.destroy	= taprio_destroy,
205444d4775cSDavide Caratti 	.reset		= taprio_reset,
205513511704SYannick Vignon 	.attach		= taprio_attach,
20565a781ccbSVinicius Costa Gomes 	.peek		= taprio_peek,
20575a781ccbSVinicius Costa Gomes 	.dequeue	= taprio_dequeue,
20585a781ccbSVinicius Costa Gomes 	.enqueue	= taprio_enqueue,
20595a781ccbSVinicius Costa Gomes 	.dump		= taprio_dump,
20605a781ccbSVinicius Costa Gomes 	.owner		= THIS_MODULE,
20615a781ccbSVinicius Costa Gomes };
20625a781ccbSVinicius Costa Gomes 
20637b9eba7bSLeandro Dorileo static struct notifier_block taprio_device_notifier = {
20647b9eba7bSLeandro Dorileo 	.notifier_call = taprio_dev_notifier,
20657b9eba7bSLeandro Dorileo };
20667b9eba7bSLeandro Dorileo 
20675a781ccbSVinicius Costa Gomes static int __init taprio_module_init(void)
20685a781ccbSVinicius Costa Gomes {
20697b9eba7bSLeandro Dorileo 	int err = register_netdevice_notifier(&taprio_device_notifier);
20707b9eba7bSLeandro Dorileo 
20717b9eba7bSLeandro Dorileo 	if (err)
20727b9eba7bSLeandro Dorileo 		return err;
20737b9eba7bSLeandro Dorileo 
20745a781ccbSVinicius Costa Gomes 	return register_qdisc(&taprio_qdisc_ops);
20755a781ccbSVinicius Costa Gomes }
20765a781ccbSVinicius Costa Gomes 
20775a781ccbSVinicius Costa Gomes static void __exit taprio_module_exit(void)
20785a781ccbSVinicius Costa Gomes {
20795a781ccbSVinicius Costa Gomes 	unregister_qdisc(&taprio_qdisc_ops);
20807b9eba7bSLeandro Dorileo 	unregister_netdevice_notifier(&taprio_device_notifier);
20815a781ccbSVinicius Costa Gomes }
20825a781ccbSVinicius Costa Gomes 
20835a781ccbSVinicius Costa Gomes module_init(taprio_module_init);
20845a781ccbSVinicius Costa Gomes module_exit(taprio_module_exit);
20855a781ccbSVinicius Costa Gomes MODULE_LICENSE("GPL");
2086