xref: /openbmc/linux/net/sched/sch_taprio.c (revision a1e6ad30fa193962b5aa61ea4d12ee83a7ce9020)
15a781ccbSVinicius Costa Gomes // SPDX-License-Identifier: GPL-2.0
25a781ccbSVinicius Costa Gomes 
35a781ccbSVinicius Costa Gomes /* net/sched/sch_taprio.c	 Time Aware Priority Scheduler
45a781ccbSVinicius Costa Gomes  *
55a781ccbSVinicius Costa Gomes  * Authors:	Vinicius Costa Gomes <vinicius.gomes@intel.com>
65a781ccbSVinicius Costa Gomes  *
75a781ccbSVinicius Costa Gomes  */
85a781ccbSVinicius Costa Gomes 
9cc69837fSJakub Kicinski #include <linux/ethtool.h>
105a781ccbSVinicius Costa Gomes #include <linux/types.h>
115a781ccbSVinicius Costa Gomes #include <linux/slab.h>
125a781ccbSVinicius Costa Gomes #include <linux/kernel.h>
135a781ccbSVinicius Costa Gomes #include <linux/string.h>
145a781ccbSVinicius Costa Gomes #include <linux/list.h>
155a781ccbSVinicius Costa Gomes #include <linux/errno.h>
165a781ccbSVinicius Costa Gomes #include <linux/skbuff.h>
1723bddf69SJakub Kicinski #include <linux/math64.h>
185a781ccbSVinicius Costa Gomes #include <linux/module.h>
195a781ccbSVinicius Costa Gomes #include <linux/spinlock.h>
20a3d43c0dSVinicius Costa Gomes #include <linux/rcupdate.h>
21837ced3aSVladimir Oltean #include <linux/time.h>
225a781ccbSVinicius Costa Gomes #include <net/netlink.h>
235a781ccbSVinicius Costa Gomes #include <net/pkt_sched.h>
245a781ccbSVinicius Costa Gomes #include <net/pkt_cls.h>
255a781ccbSVinicius Costa Gomes #include <net/sch_generic.h>
264cfd5779SVedang Patel #include <net/sock.h>
2754002066SVedang Patel #include <net/tcp.h>
285a781ccbSVinicius Costa Gomes 
291dfe086dSVladimir Oltean #include "sch_mqprio_lib.h"
301dfe086dSVladimir Oltean 
317b9eba7bSLeandro Dorileo static LIST_HEAD(taprio_list);
322f530df7SVladimir Oltean static struct static_key_false taprio_have_broken_mqprio;
332f530df7SVladimir Oltean static struct static_key_false taprio_have_working_mqprio;
347b9eba7bSLeandro Dorileo 
355a781ccbSVinicius Costa Gomes #define TAPRIO_ALL_GATES_OPEN -1
365a781ccbSVinicius Costa Gomes 
374cfd5779SVedang Patel #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
389c66d156SVinicius Costa Gomes #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
39a9d62274SVinicius Costa Gomes #define TAPRIO_FLAGS_INVALID U32_MAX
404cfd5779SVedang Patel 
415a781ccbSVinicius Costa Gomes struct sched_entry {
42a306a90cSVladimir Oltean 	/* Durations between this GCL entry and the GCL entry where the
43a306a90cSVladimir Oltean 	 * respective traffic class gate closes
44a306a90cSVladimir Oltean 	 */
45a306a90cSVladimir Oltean 	u64 gate_duration[TC_MAX_QUEUE];
46d2ad689dSVladimir Oltean 	atomic_t budget[TC_MAX_QUEUE];
47*a1e6ad30SVladimir Oltean 	/* The qdisc makes some effort so that no packet leaves
48*a1e6ad30SVladimir Oltean 	 * after this time
495a781ccbSVinicius Costa Gomes 	 */
50*a1e6ad30SVladimir Oltean 	ktime_t gate_close_time[TC_MAX_QUEUE];
51*a1e6ad30SVladimir Oltean 	struct list_head list;
52*a1e6ad30SVladimir Oltean 	/* Used to calculate when to advance the schedule */
53e5517551SVladimir Oltean 	ktime_t end_time;
544cfd5779SVedang Patel 	ktime_t next_txtime;
555a781ccbSVinicius Costa Gomes 	int index;
565a781ccbSVinicius Costa Gomes 	u32 gate_mask;
575a781ccbSVinicius Costa Gomes 	u32 interval;
585a781ccbSVinicius Costa Gomes 	u8 command;
595a781ccbSVinicius Costa Gomes };
605a781ccbSVinicius Costa Gomes 
61a3d43c0dSVinicius Costa Gomes struct sched_gate_list {
62a306a90cSVladimir Oltean 	/* Longest non-zero contiguous gate durations per traffic class,
63a306a90cSVladimir Oltean 	 * or 0 if a traffic class gate never opens during the schedule.
64a306a90cSVladimir Oltean 	 */
65a306a90cSVladimir Oltean 	u64 max_open_gate_duration[TC_MAX_QUEUE];
66a3d43c0dSVinicius Costa Gomes 	struct rcu_head rcu;
67a3d43c0dSVinicius Costa Gomes 	struct list_head entries;
68a3d43c0dSVinicius Costa Gomes 	size_t num_entries;
69e5517551SVladimir Oltean 	ktime_t cycle_end_time;
706ca6a665SVinicius Costa Gomes 	s64 cycle_time;
71c25031e9SVinicius Costa Gomes 	s64 cycle_time_extension;
72a3d43c0dSVinicius Costa Gomes 	s64 base_time;
73a3d43c0dSVinicius Costa Gomes };
74a3d43c0dSVinicius Costa Gomes 
755a781ccbSVinicius Costa Gomes struct taprio_sched {
765a781ccbSVinicius Costa Gomes 	struct Qdisc **qdiscs;
775a781ccbSVinicius Costa Gomes 	struct Qdisc *root;
784cfd5779SVedang Patel 	u32 flags;
797ede7b03SVedang Patel 	enum tk_offsets tk_offset;
805a781ccbSVinicius Costa Gomes 	int clockid;
81db46e3a8SVladimir Oltean 	bool offloaded;
822f530df7SVladimir Oltean 	bool detected_mqprio;
832f530df7SVladimir Oltean 	bool broken_mqprio;
847b9eba7bSLeandro Dorileo 	atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
855a781ccbSVinicius Costa Gomes 				    * speeds it's sub-nanoseconds per byte
865a781ccbSVinicius Costa Gomes 				    */
875a781ccbSVinicius Costa Gomes 
885a781ccbSVinicius Costa Gomes 	/* Protects the update side of the RCU protected current_entry */
895a781ccbSVinicius Costa Gomes 	spinlock_t current_entry_lock;
905a781ccbSVinicius Costa Gomes 	struct sched_entry __rcu *current_entry;
91a3d43c0dSVinicius Costa Gomes 	struct sched_gate_list __rcu *oper_sched;
92a3d43c0dSVinicius Costa Gomes 	struct sched_gate_list __rcu *admin_sched;
935a781ccbSVinicius Costa Gomes 	struct hrtimer advance_timer;
947b9eba7bSLeandro Dorileo 	struct list_head taprio_list;
952f530df7SVladimir Oltean 	int cur_txq[TC_MAX_QUEUE];
96a54fc09eSVladimir Oltean 	u32 max_frm_len[TC_MAX_QUEUE]; /* for the fast path */
97a54fc09eSVladimir Oltean 	u32 max_sdu[TC_MAX_QUEUE]; /* for dump and offloading */
98a5b64700SVedang Patel 	u32 txtime_delay;
995a781ccbSVinicius Costa Gomes };
1005a781ccbSVinicius Costa Gomes 
1019c66d156SVinicius Costa Gomes struct __tc_taprio_qopt_offload {
1029c66d156SVinicius Costa Gomes 	refcount_t users;
1039c66d156SVinicius Costa Gomes 	struct tc_taprio_qopt_offload offload;
1049c66d156SVinicius Costa Gomes };
1059c66d156SVinicius Costa Gomes 
106a306a90cSVladimir Oltean static void taprio_calculate_gate_durations(struct taprio_sched *q,
107a306a90cSVladimir Oltean 					    struct sched_gate_list *sched)
108a306a90cSVladimir Oltean {
109a306a90cSVladimir Oltean 	struct net_device *dev = qdisc_dev(q->root);
110a306a90cSVladimir Oltean 	int num_tc = netdev_get_num_tc(dev);
111a306a90cSVladimir Oltean 	struct sched_entry *entry, *cur;
112a306a90cSVladimir Oltean 	int tc;
113a306a90cSVladimir Oltean 
114a306a90cSVladimir Oltean 	list_for_each_entry(entry, &sched->entries, list) {
115a306a90cSVladimir Oltean 		u32 gates_still_open = entry->gate_mask;
116a306a90cSVladimir Oltean 
117a306a90cSVladimir Oltean 		/* For each traffic class, calculate each open gate duration,
118a306a90cSVladimir Oltean 		 * starting at this schedule entry and ending at the schedule
119a306a90cSVladimir Oltean 		 * entry containing a gate close event for that TC.
120a306a90cSVladimir Oltean 		 */
121a306a90cSVladimir Oltean 		cur = entry;
122a306a90cSVladimir Oltean 
123a306a90cSVladimir Oltean 		do {
124a306a90cSVladimir Oltean 			if (!gates_still_open)
125a306a90cSVladimir Oltean 				break;
126a306a90cSVladimir Oltean 
127a306a90cSVladimir Oltean 			for (tc = 0; tc < num_tc; tc++) {
128a306a90cSVladimir Oltean 				if (!(gates_still_open & BIT(tc)))
129a306a90cSVladimir Oltean 					continue;
130a306a90cSVladimir Oltean 
131a306a90cSVladimir Oltean 				if (cur->gate_mask & BIT(tc))
132a306a90cSVladimir Oltean 					entry->gate_duration[tc] += cur->interval;
133a306a90cSVladimir Oltean 				else
134a306a90cSVladimir Oltean 					gates_still_open &= ~BIT(tc);
135a306a90cSVladimir Oltean 			}
136a306a90cSVladimir Oltean 
137a306a90cSVladimir Oltean 			cur = list_next_entry_circular(cur, &sched->entries, list);
138a306a90cSVladimir Oltean 		} while (cur != entry);
139a306a90cSVladimir Oltean 
140a306a90cSVladimir Oltean 		/* Keep track of the maximum gate duration for each traffic
141a306a90cSVladimir Oltean 		 * class, taking care to not confuse a traffic class which is
142a306a90cSVladimir Oltean 		 * temporarily closed with one that is always closed.
143a306a90cSVladimir Oltean 		 */
144a306a90cSVladimir Oltean 		for (tc = 0; tc < num_tc; tc++)
145a306a90cSVladimir Oltean 			if (entry->gate_duration[tc] &&
146a306a90cSVladimir Oltean 			    sched->max_open_gate_duration[tc] < entry->gate_duration[tc])
147a306a90cSVladimir Oltean 				sched->max_open_gate_duration[tc] = entry->gate_duration[tc];
148a306a90cSVladimir Oltean 	}
149a306a90cSVladimir Oltean }
150a306a90cSVladimir Oltean 
151*a1e6ad30SVladimir Oltean static bool taprio_entry_allows_tx(ktime_t skb_end_time,
152*a1e6ad30SVladimir Oltean 				   struct sched_entry *entry, int tc)
153*a1e6ad30SVladimir Oltean {
154*a1e6ad30SVladimir Oltean 	return ktime_before(skb_end_time, entry->gate_close_time[tc]);
155*a1e6ad30SVladimir Oltean }
156*a1e6ad30SVladimir Oltean 
157a3d43c0dSVinicius Costa Gomes static ktime_t sched_base_time(const struct sched_gate_list *sched)
158a3d43c0dSVinicius Costa Gomes {
159a3d43c0dSVinicius Costa Gomes 	if (!sched)
160a3d43c0dSVinicius Costa Gomes 		return KTIME_MAX;
161a3d43c0dSVinicius Costa Gomes 
162a3d43c0dSVinicius Costa Gomes 	return ns_to_ktime(sched->base_time);
163a3d43c0dSVinicius Costa Gomes }
164a3d43c0dSVinicius Costa Gomes 
1656dc25401SEric Dumazet static ktime_t taprio_mono_to_any(const struct taprio_sched *q, ktime_t mono)
1667ede7b03SVedang Patel {
1676dc25401SEric Dumazet 	/* This pairs with WRITE_ONCE() in taprio_parse_clockid() */
1686dc25401SEric Dumazet 	enum tk_offsets tk_offset = READ_ONCE(q->tk_offset);
1697ede7b03SVedang Patel 
1706dc25401SEric Dumazet 	switch (tk_offset) {
1717ede7b03SVedang Patel 	case TK_OFFS_MAX:
1727ede7b03SVedang Patel 		return mono;
1737ede7b03SVedang Patel 	default:
1746dc25401SEric Dumazet 		return ktime_mono_to_any(mono, tk_offset);
1756dc25401SEric Dumazet 	}
1767ede7b03SVedang Patel }
1777ede7b03SVedang Patel 
1786dc25401SEric Dumazet static ktime_t taprio_get_time(const struct taprio_sched *q)
1796dc25401SEric Dumazet {
1806dc25401SEric Dumazet 	return taprio_mono_to_any(q, ktime_get());
1817ede7b03SVedang Patel }
1827ede7b03SVedang Patel 
183a3d43c0dSVinicius Costa Gomes static void taprio_free_sched_cb(struct rcu_head *head)
184a3d43c0dSVinicius Costa Gomes {
185a3d43c0dSVinicius Costa Gomes 	struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu);
186a3d43c0dSVinicius Costa Gomes 	struct sched_entry *entry, *n;
187a3d43c0dSVinicius Costa Gomes 
188a3d43c0dSVinicius Costa Gomes 	list_for_each_entry_safe(entry, n, &sched->entries, list) {
189a3d43c0dSVinicius Costa Gomes 		list_del(&entry->list);
190a3d43c0dSVinicius Costa Gomes 		kfree(entry);
191a3d43c0dSVinicius Costa Gomes 	}
192a3d43c0dSVinicius Costa Gomes 
193a3d43c0dSVinicius Costa Gomes 	kfree(sched);
194a3d43c0dSVinicius Costa Gomes }
195a3d43c0dSVinicius Costa Gomes 
196a3d43c0dSVinicius Costa Gomes static void switch_schedules(struct taprio_sched *q,
197a3d43c0dSVinicius Costa Gomes 			     struct sched_gate_list **admin,
198a3d43c0dSVinicius Costa Gomes 			     struct sched_gate_list **oper)
199a3d43c0dSVinicius Costa Gomes {
200a3d43c0dSVinicius Costa Gomes 	rcu_assign_pointer(q->oper_sched, *admin);
201a3d43c0dSVinicius Costa Gomes 	rcu_assign_pointer(q->admin_sched, NULL);
202a3d43c0dSVinicius Costa Gomes 
203a3d43c0dSVinicius Costa Gomes 	if (*oper)
204a3d43c0dSVinicius Costa Gomes 		call_rcu(&(*oper)->rcu, taprio_free_sched_cb);
205a3d43c0dSVinicius Costa Gomes 
206a3d43c0dSVinicius Costa Gomes 	*oper = *admin;
207a3d43c0dSVinicius Costa Gomes 	*admin = NULL;
208a3d43c0dSVinicius Costa Gomes }
209a3d43c0dSVinicius Costa Gomes 
2104cfd5779SVedang Patel /* Get how much time has been already elapsed in the current cycle. */
2114cfd5779SVedang Patel static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time)
2124cfd5779SVedang Patel {
2134cfd5779SVedang Patel 	ktime_t time_since_sched_start;
2144cfd5779SVedang Patel 	s32 time_elapsed;
2154cfd5779SVedang Patel 
2164cfd5779SVedang Patel 	time_since_sched_start = ktime_sub(time, sched->base_time);
2174cfd5779SVedang Patel 	div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed);
2184cfd5779SVedang Patel 
2194cfd5779SVedang Patel 	return time_elapsed;
2204cfd5779SVedang Patel }
2214cfd5779SVedang Patel 
2224cfd5779SVedang Patel static ktime_t get_interval_end_time(struct sched_gate_list *sched,
2234cfd5779SVedang Patel 				     struct sched_gate_list *admin,
2244cfd5779SVedang Patel 				     struct sched_entry *entry,
2254cfd5779SVedang Patel 				     ktime_t intv_start)
2264cfd5779SVedang Patel {
2274cfd5779SVedang Patel 	s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start);
2284cfd5779SVedang Patel 	ktime_t intv_end, cycle_ext_end, cycle_end;
2294cfd5779SVedang Patel 
2304cfd5779SVedang Patel 	cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed);
2314cfd5779SVedang Patel 	intv_end = ktime_add_ns(intv_start, entry->interval);
2324cfd5779SVedang Patel 	cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension);
2334cfd5779SVedang Patel 
2344cfd5779SVedang Patel 	if (ktime_before(intv_end, cycle_end))
2354cfd5779SVedang Patel 		return intv_end;
2364cfd5779SVedang Patel 	else if (admin && admin != sched &&
2374cfd5779SVedang Patel 		 ktime_after(admin->base_time, cycle_end) &&
2384cfd5779SVedang Patel 		 ktime_before(admin->base_time, cycle_ext_end))
2394cfd5779SVedang Patel 		return admin->base_time;
2404cfd5779SVedang Patel 	else
2414cfd5779SVedang Patel 		return cycle_end;
2424cfd5779SVedang Patel }
2434cfd5779SVedang Patel 
2444cfd5779SVedang Patel static int length_to_duration(struct taprio_sched *q, int len)
2454cfd5779SVedang Patel {
246837ced3aSVladimir Oltean 	return div_u64(len * atomic64_read(&q->picos_per_byte), PSEC_PER_NSEC);
2474cfd5779SVedang Patel }
2484cfd5779SVedang Patel 
2494cfd5779SVedang Patel /* Returns the entry corresponding to next available interval. If
2504cfd5779SVedang Patel  * validate_interval is set, it only validates whether the timestamp occurs
2514cfd5779SVedang Patel  * when the gate corresponding to the skb's traffic class is open.
2524cfd5779SVedang Patel  */
2534cfd5779SVedang Patel static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb,
2544cfd5779SVedang Patel 						  struct Qdisc *sch,
2554cfd5779SVedang Patel 						  struct sched_gate_list *sched,
2564cfd5779SVedang Patel 						  struct sched_gate_list *admin,
2574cfd5779SVedang Patel 						  ktime_t time,
2584cfd5779SVedang Patel 						  ktime_t *interval_start,
2594cfd5779SVedang Patel 						  ktime_t *interval_end,
2604cfd5779SVedang Patel 						  bool validate_interval)
2614cfd5779SVedang Patel {
2624cfd5779SVedang Patel 	ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time;
2634cfd5779SVedang Patel 	ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time;
2644cfd5779SVedang Patel 	struct sched_entry *entry = NULL, *entry_found = NULL;
2654cfd5779SVedang Patel 	struct taprio_sched *q = qdisc_priv(sch);
2664cfd5779SVedang Patel 	struct net_device *dev = qdisc_dev(sch);
2674cfd5779SVedang Patel 	bool entry_available = false;
2684cfd5779SVedang Patel 	s32 cycle_elapsed;
2694cfd5779SVedang Patel 	int tc, n;
2704cfd5779SVedang Patel 
2714cfd5779SVedang Patel 	tc = netdev_get_prio_tc_map(dev, skb->priority);
2724cfd5779SVedang Patel 	packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb));
2734cfd5779SVedang Patel 
2744cfd5779SVedang Patel 	*interval_start = 0;
2754cfd5779SVedang Patel 	*interval_end = 0;
2764cfd5779SVedang Patel 
2774cfd5779SVedang Patel 	if (!sched)
2784cfd5779SVedang Patel 		return NULL;
2794cfd5779SVedang Patel 
2804cfd5779SVedang Patel 	cycle = sched->cycle_time;
2814cfd5779SVedang Patel 	cycle_elapsed = get_cycle_time_elapsed(sched, time);
2824cfd5779SVedang Patel 	curr_intv_end = ktime_sub_ns(time, cycle_elapsed);
2834cfd5779SVedang Patel 	cycle_end = ktime_add_ns(curr_intv_end, cycle);
2844cfd5779SVedang Patel 
2854cfd5779SVedang Patel 	list_for_each_entry(entry, &sched->entries, list) {
2864cfd5779SVedang Patel 		curr_intv_start = curr_intv_end;
2874cfd5779SVedang Patel 		curr_intv_end = get_interval_end_time(sched, admin, entry,
2884cfd5779SVedang Patel 						      curr_intv_start);
2894cfd5779SVedang Patel 
2904cfd5779SVedang Patel 		if (ktime_after(curr_intv_start, cycle_end))
2914cfd5779SVedang Patel 			break;
2924cfd5779SVedang Patel 
2934cfd5779SVedang Patel 		if (!(entry->gate_mask & BIT(tc)) ||
2944cfd5779SVedang Patel 		    packet_transmit_time > entry->interval)
2954cfd5779SVedang Patel 			continue;
2964cfd5779SVedang Patel 
2974cfd5779SVedang Patel 		txtime = entry->next_txtime;
2984cfd5779SVedang Patel 
2994cfd5779SVedang Patel 		if (ktime_before(txtime, time) || validate_interval) {
3004cfd5779SVedang Patel 			transmit_end_time = ktime_add_ns(time, packet_transmit_time);
3014cfd5779SVedang Patel 			if ((ktime_before(curr_intv_start, time) &&
3024cfd5779SVedang Patel 			     ktime_before(transmit_end_time, curr_intv_end)) ||
3034cfd5779SVedang Patel 			    (ktime_after(curr_intv_start, time) && !validate_interval)) {
3044cfd5779SVedang Patel 				entry_found = entry;
3054cfd5779SVedang Patel 				*interval_start = curr_intv_start;
3064cfd5779SVedang Patel 				*interval_end = curr_intv_end;
3074cfd5779SVedang Patel 				break;
3084cfd5779SVedang Patel 			} else if (!entry_available && !validate_interval) {
3094cfd5779SVedang Patel 				/* Here, we are just trying to find out the
3104cfd5779SVedang Patel 				 * first available interval in the next cycle.
3114cfd5779SVedang Patel 				 */
3120deee7aaSJiapeng Zhong 				entry_available = true;
3134cfd5779SVedang Patel 				entry_found = entry;
3144cfd5779SVedang Patel 				*interval_start = ktime_add_ns(curr_intv_start, cycle);
3154cfd5779SVedang Patel 				*interval_end = ktime_add_ns(curr_intv_end, cycle);
3164cfd5779SVedang Patel 			}
3174cfd5779SVedang Patel 		} else if (ktime_before(txtime, earliest_txtime) &&
3184cfd5779SVedang Patel 			   !entry_available) {
3194cfd5779SVedang Patel 			earliest_txtime = txtime;
3204cfd5779SVedang Patel 			entry_found = entry;
3214cfd5779SVedang Patel 			n = div_s64(ktime_sub(txtime, curr_intv_start), cycle);
3224cfd5779SVedang Patel 			*interval_start = ktime_add(curr_intv_start, n * cycle);
3234cfd5779SVedang Patel 			*interval_end = ktime_add(curr_intv_end, n * cycle);
3244cfd5779SVedang Patel 		}
3254cfd5779SVedang Patel 	}
3264cfd5779SVedang Patel 
3274cfd5779SVedang Patel 	return entry_found;
3284cfd5779SVedang Patel }
3294cfd5779SVedang Patel 
3304cfd5779SVedang Patel static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
3314cfd5779SVedang Patel {
3324cfd5779SVedang Patel 	struct taprio_sched *q = qdisc_priv(sch);
3334cfd5779SVedang Patel 	struct sched_gate_list *sched, *admin;
3344cfd5779SVedang Patel 	ktime_t interval_start, interval_end;
3354cfd5779SVedang Patel 	struct sched_entry *entry;
3364cfd5779SVedang Patel 
3374cfd5779SVedang Patel 	rcu_read_lock();
3384cfd5779SVedang Patel 	sched = rcu_dereference(q->oper_sched);
3394cfd5779SVedang Patel 	admin = rcu_dereference(q->admin_sched);
3404cfd5779SVedang Patel 
3414cfd5779SVedang Patel 	entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp,
3424cfd5779SVedang Patel 				       &interval_start, &interval_end, true);
3434cfd5779SVedang Patel 	rcu_read_unlock();
3444cfd5779SVedang Patel 
3454cfd5779SVedang Patel 	return entry;
3464cfd5779SVedang Patel }
3474cfd5779SVedang Patel 
3489c66d156SVinicius Costa Gomes static bool taprio_flags_valid(u32 flags)
3499c66d156SVinicius Costa Gomes {
3509c66d156SVinicius Costa Gomes 	/* Make sure no other flag bits are set. */
3519c66d156SVinicius Costa Gomes 	if (flags & ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST |
3529c66d156SVinicius Costa Gomes 		      TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
3539c66d156SVinicius Costa Gomes 		return false;
3549c66d156SVinicius Costa Gomes 	/* txtime-assist and full offload are mutually exclusive */
3559c66d156SVinicius Costa Gomes 	if ((flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&
3569c66d156SVinicius Costa Gomes 	    (flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
3579c66d156SVinicius Costa Gomes 		return false;
3589c66d156SVinicius Costa Gomes 	return true;
3599c66d156SVinicius Costa Gomes }
3609c66d156SVinicius Costa Gomes 
36154002066SVedang Patel /* This returns the tstamp value set by TCP in terms of the set clock. */
36254002066SVedang Patel static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
36354002066SVedang Patel {
36454002066SVedang Patel 	unsigned int offset = skb_network_offset(skb);
36554002066SVedang Patel 	const struct ipv6hdr *ipv6h;
36654002066SVedang Patel 	const struct iphdr *iph;
36754002066SVedang Patel 	struct ipv6hdr _ipv6h;
36854002066SVedang Patel 
36954002066SVedang Patel 	ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
37054002066SVedang Patel 	if (!ipv6h)
37154002066SVedang Patel 		return 0;
37254002066SVedang Patel 
37354002066SVedang Patel 	if (ipv6h->version == 4) {
37454002066SVedang Patel 		iph = (struct iphdr *)ipv6h;
37554002066SVedang Patel 		offset += iph->ihl * 4;
37654002066SVedang Patel 
37754002066SVedang Patel 		/* special-case 6in4 tunnelling, as that is a common way to get
37854002066SVedang Patel 		 * v6 connectivity in the home
37954002066SVedang Patel 		 */
38054002066SVedang Patel 		if (iph->protocol == IPPROTO_IPV6) {
38154002066SVedang Patel 			ipv6h = skb_header_pointer(skb, offset,
38254002066SVedang Patel 						   sizeof(_ipv6h), &_ipv6h);
38354002066SVedang Patel 
38454002066SVedang Patel 			if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
38554002066SVedang Patel 				return 0;
38654002066SVedang Patel 		} else if (iph->protocol != IPPROTO_TCP) {
38754002066SVedang Patel 			return 0;
38854002066SVedang Patel 		}
38954002066SVedang Patel 	} else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) {
39054002066SVedang Patel 		return 0;
39154002066SVedang Patel 	}
39254002066SVedang Patel 
3936dc25401SEric Dumazet 	return taprio_mono_to_any(q, skb->skb_mstamp_ns);
39454002066SVedang Patel }
39554002066SVedang Patel 
3964cfd5779SVedang Patel /* There are a few scenarios where we will have to modify the txtime from
3974cfd5779SVedang Patel  * what is read from next_txtime in sched_entry. They are:
3984cfd5779SVedang Patel  * 1. If txtime is in the past,
3994cfd5779SVedang Patel  *    a. The gate for the traffic class is currently open and packet can be
4004cfd5779SVedang Patel  *       transmitted before it closes, schedule the packet right away.
4014cfd5779SVedang Patel  *    b. If the gate corresponding to the traffic class is going to open later
4024cfd5779SVedang Patel  *       in the cycle, set the txtime of packet to the interval start.
4034cfd5779SVedang Patel  * 2. If txtime is in the future, there are packets corresponding to the
4044cfd5779SVedang Patel  *    current traffic class waiting to be transmitted. So, the following
4054cfd5779SVedang Patel  *    possibilities exist:
4064cfd5779SVedang Patel  *    a. We can transmit the packet before the window containing the txtime
4074cfd5779SVedang Patel  *       closes.
4084cfd5779SVedang Patel  *    b. The window might close before the transmission can be completed
4094cfd5779SVedang Patel  *       successfully. So, schedule the packet in the next open window.
4104cfd5779SVedang Patel  */
4114cfd5779SVedang Patel static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch)
4124cfd5779SVedang Patel {
41354002066SVedang Patel 	ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp;
4144cfd5779SVedang Patel 	struct taprio_sched *q = qdisc_priv(sch);
4154cfd5779SVedang Patel 	struct sched_gate_list *sched, *admin;
4164cfd5779SVedang Patel 	ktime_t minimum_time, now, txtime;
4174cfd5779SVedang Patel 	int len, packet_transmit_time;
4184cfd5779SVedang Patel 	struct sched_entry *entry;
4194cfd5779SVedang Patel 	bool sched_changed;
4204cfd5779SVedang Patel 
4217ede7b03SVedang Patel 	now = taprio_get_time(q);
4224cfd5779SVedang Patel 	minimum_time = ktime_add_ns(now, q->txtime_delay);
4234cfd5779SVedang Patel 
42454002066SVedang Patel 	tcp_tstamp = get_tcp_tstamp(q, skb);
42554002066SVedang Patel 	minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp);
42654002066SVedang Patel 
4274cfd5779SVedang Patel 	rcu_read_lock();
4284cfd5779SVedang Patel 	admin = rcu_dereference(q->admin_sched);
4294cfd5779SVedang Patel 	sched = rcu_dereference(q->oper_sched);
4304cfd5779SVedang Patel 	if (admin && ktime_after(minimum_time, admin->base_time))
4314cfd5779SVedang Patel 		switch_schedules(q, &admin, &sched);
4324cfd5779SVedang Patel 
4334cfd5779SVedang Patel 	/* Until the schedule starts, all the queues are open */
4344cfd5779SVedang Patel 	if (!sched || ktime_before(minimum_time, sched->base_time)) {
4354cfd5779SVedang Patel 		txtime = minimum_time;
4364cfd5779SVedang Patel 		goto done;
4374cfd5779SVedang Patel 	}
4384cfd5779SVedang Patel 
4394cfd5779SVedang Patel 	len = qdisc_pkt_len(skb);
4404cfd5779SVedang Patel 	packet_transmit_time = length_to_duration(q, len);
4414cfd5779SVedang Patel 
4424cfd5779SVedang Patel 	do {
4430deee7aaSJiapeng Zhong 		sched_changed = false;
4444cfd5779SVedang Patel 
4454cfd5779SVedang Patel 		entry = find_entry_to_transmit(skb, sch, sched, admin,
4464cfd5779SVedang Patel 					       minimum_time,
4474cfd5779SVedang Patel 					       &interval_start, &interval_end,
4484cfd5779SVedang Patel 					       false);
4494cfd5779SVedang Patel 		if (!entry) {
4504cfd5779SVedang Patel 			txtime = 0;
4514cfd5779SVedang Patel 			goto done;
4524cfd5779SVedang Patel 		}
4534cfd5779SVedang Patel 
4544cfd5779SVedang Patel 		txtime = entry->next_txtime;
4554cfd5779SVedang Patel 		txtime = max_t(ktime_t, txtime, minimum_time);
4564cfd5779SVedang Patel 		txtime = max_t(ktime_t, txtime, interval_start);
4574cfd5779SVedang Patel 
4584cfd5779SVedang Patel 		if (admin && admin != sched &&
4594cfd5779SVedang Patel 		    ktime_after(txtime, admin->base_time)) {
4604cfd5779SVedang Patel 			sched = admin;
4610deee7aaSJiapeng Zhong 			sched_changed = true;
4624cfd5779SVedang Patel 			continue;
4634cfd5779SVedang Patel 		}
4644cfd5779SVedang Patel 
4654cfd5779SVedang Patel 		transmit_end_time = ktime_add(txtime, packet_transmit_time);
4664cfd5779SVedang Patel 		minimum_time = transmit_end_time;
4674cfd5779SVedang Patel 
4684cfd5779SVedang Patel 		/* Update the txtime of current entry to the next time it's
4694cfd5779SVedang Patel 		 * interval starts.
4704cfd5779SVedang Patel 		 */
4714cfd5779SVedang Patel 		if (ktime_after(transmit_end_time, interval_end))
4724cfd5779SVedang Patel 			entry->next_txtime = ktime_add(interval_start, sched->cycle_time);
4734cfd5779SVedang Patel 	} while (sched_changed || ktime_after(transmit_end_time, interval_end));
4744cfd5779SVedang Patel 
4754cfd5779SVedang Patel 	entry->next_txtime = transmit_end_time;
4764cfd5779SVedang Patel 
4774cfd5779SVedang Patel done:
4784cfd5779SVedang Patel 	rcu_read_unlock();
4794cfd5779SVedang Patel 	return txtime;
4804cfd5779SVedang Patel }
4814cfd5779SVedang Patel 
482497cc002SKurt Kanzenbach static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
483497cc002SKurt Kanzenbach 			      struct Qdisc *child, struct sk_buff **to_free)
4845a781ccbSVinicius Costa Gomes {
4855a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
486a54fc09eSVladimir Oltean 	struct net_device *dev = qdisc_dev(sch);
487a54fc09eSVladimir Oltean 	int prio = skb->priority;
488a54fc09eSVladimir Oltean 	u8 tc;
4895a781ccbSVinicius Costa Gomes 
490e8a64bbaSBenedikt Spranger 	/* sk_flags are only safe to use on full sockets. */
491e8a64bbaSBenedikt Spranger 	if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) {
4924cfd5779SVedang Patel 		if (!is_valid_interval(skb, sch))
4934cfd5779SVedang Patel 			return qdisc_drop(skb, sch, to_free);
4944cfd5779SVedang Patel 	} else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
4954cfd5779SVedang Patel 		skb->tstamp = get_packet_txtime(skb, sch);
4964cfd5779SVedang Patel 		if (!skb->tstamp)
4974cfd5779SVedang Patel 			return qdisc_drop(skb, sch, to_free);
4984cfd5779SVedang Patel 	}
4994cfd5779SVedang Patel 
500a54fc09eSVladimir Oltean 	/* Devices with full offload are expected to honor this in hardware */
501a54fc09eSVladimir Oltean 	tc = netdev_get_prio_tc_map(dev, prio);
502a54fc09eSVladimir Oltean 	if (skb->len > q->max_frm_len[tc])
503a54fc09eSVladimir Oltean 		return qdisc_drop(skb, sch, to_free);
504a54fc09eSVladimir Oltean 
5055a781ccbSVinicius Costa Gomes 	qdisc_qstats_backlog_inc(sch, skb);
5065a781ccbSVinicius Costa Gomes 	sch->q.qlen++;
5075a781ccbSVinicius Costa Gomes 
508ac5c66f2SPetr Machata 	return qdisc_enqueue(skb, child, to_free);
5095a781ccbSVinicius Costa Gomes }
5105a781ccbSVinicius Costa Gomes 
5112c08a4f8SVladimir Oltean /* Will not be called in the full offload case, since the TX queues are
5122c08a4f8SVladimir Oltean  * attached to the Qdisc created using qdisc_create_dflt()
5132c08a4f8SVladimir Oltean  */
514497cc002SKurt Kanzenbach static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
515497cc002SKurt Kanzenbach 			  struct sk_buff **to_free)
516497cc002SKurt Kanzenbach {
517497cc002SKurt Kanzenbach 	struct taprio_sched *q = qdisc_priv(sch);
518497cc002SKurt Kanzenbach 	struct Qdisc *child;
519497cc002SKurt Kanzenbach 	int queue;
520497cc002SKurt Kanzenbach 
521497cc002SKurt Kanzenbach 	queue = skb_get_queue_mapping(skb);
522497cc002SKurt Kanzenbach 
523497cc002SKurt Kanzenbach 	child = q->qdiscs[queue];
524497cc002SKurt Kanzenbach 	if (unlikely(!child))
525497cc002SKurt Kanzenbach 		return qdisc_drop(skb, sch, to_free);
526497cc002SKurt Kanzenbach 
527497cc002SKurt Kanzenbach 	/* Large packets might not be transmitted when the transmission duration
528497cc002SKurt Kanzenbach 	 * exceeds any configured interval. Therefore, segment the skb into
529fa65eddeSVladimir Oltean 	 * smaller chunks. Drivers with full offload are expected to handle
530fa65eddeSVladimir Oltean 	 * this in hardware.
531497cc002SKurt Kanzenbach 	 */
532fa65eddeSVladimir Oltean 	if (skb_is_gso(skb)) {
533497cc002SKurt Kanzenbach 		unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb);
534497cc002SKurt Kanzenbach 		netdev_features_t features = netif_skb_features(skb);
535497cc002SKurt Kanzenbach 		struct sk_buff *segs, *nskb;
536497cc002SKurt Kanzenbach 		int ret;
537497cc002SKurt Kanzenbach 
538497cc002SKurt Kanzenbach 		segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
539497cc002SKurt Kanzenbach 		if (IS_ERR_OR_NULL(segs))
540497cc002SKurt Kanzenbach 			return qdisc_drop(skb, sch, to_free);
541497cc002SKurt Kanzenbach 
542497cc002SKurt Kanzenbach 		skb_list_walk_safe(segs, segs, nskb) {
543497cc002SKurt Kanzenbach 			skb_mark_not_on_list(segs);
544497cc002SKurt Kanzenbach 			qdisc_skb_cb(segs)->pkt_len = segs->len;
545497cc002SKurt Kanzenbach 			slen += segs->len;
546497cc002SKurt Kanzenbach 
547497cc002SKurt Kanzenbach 			ret = taprio_enqueue_one(segs, sch, child, to_free);
548497cc002SKurt Kanzenbach 			if (ret != NET_XMIT_SUCCESS) {
549497cc002SKurt Kanzenbach 				if (net_xmit_drop_count(ret))
550497cc002SKurt Kanzenbach 					qdisc_qstats_drop(sch);
551497cc002SKurt Kanzenbach 			} else {
552497cc002SKurt Kanzenbach 				numsegs++;
553497cc002SKurt Kanzenbach 			}
554497cc002SKurt Kanzenbach 		}
555497cc002SKurt Kanzenbach 
556497cc002SKurt Kanzenbach 		if (numsegs > 1)
557497cc002SKurt Kanzenbach 			qdisc_tree_reduce_backlog(sch, 1 - numsegs, len - slen);
558497cc002SKurt Kanzenbach 		consume_skb(skb);
559497cc002SKurt Kanzenbach 
560497cc002SKurt Kanzenbach 		return numsegs > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
561497cc002SKurt Kanzenbach 	}
562497cc002SKurt Kanzenbach 
563497cc002SKurt Kanzenbach 	return taprio_enqueue_one(skb, sch, child, to_free);
564497cc002SKurt Kanzenbach }
565497cc002SKurt Kanzenbach 
56625becba6SVladimir Oltean static struct sk_buff *taprio_peek(struct Qdisc *sch)
5675a781ccbSVinicius Costa Gomes {
568ecc0cc98SVladimir Oltean 	WARN_ONCE(1, "taprio only supports operating as root qdisc, peek() not implemented");
5695a781ccbSVinicius Costa Gomes 	return NULL;
5705a781ccbSVinicius Costa Gomes }
5715a781ccbSVinicius Costa Gomes 
572d2ad689dSVladimir Oltean static void taprio_set_budgets(struct taprio_sched *q,
573d2ad689dSVladimir Oltean 			       struct sched_gate_list *sched,
574d2ad689dSVladimir Oltean 			       struct sched_entry *entry)
57523bddf69SJakub Kicinski {
576d2ad689dSVladimir Oltean 	struct net_device *dev = qdisc_dev(q->root);
577d2ad689dSVladimir Oltean 	int num_tc = netdev_get_num_tc(dev);
578d2ad689dSVladimir Oltean 	int tc, budget;
579d2ad689dSVladimir Oltean 
580d2ad689dSVladimir Oltean 	for (tc = 0; tc < num_tc; tc++) {
581d2ad689dSVladimir Oltean 		/* Traffic classes which never close have infinite budget */
582d2ad689dSVladimir Oltean 		if (entry->gate_duration[tc] == sched->cycle_time)
583d2ad689dSVladimir Oltean 			budget = INT_MAX;
584d2ad689dSVladimir Oltean 		else
585d2ad689dSVladimir Oltean 			budget = div64_u64((u64)entry->gate_duration[tc] * PSEC_PER_NSEC,
586d2ad689dSVladimir Oltean 					   atomic64_read(&q->picos_per_byte));
587d2ad689dSVladimir Oltean 
588d2ad689dSVladimir Oltean 		atomic_set(&entry->budget[tc], budget);
589d2ad689dSVladimir Oltean 	}
590d2ad689dSVladimir Oltean }
591d2ad689dSVladimir Oltean 
592d2ad689dSVladimir Oltean /* When an skb is sent, it consumes from the budget of all traffic classes */
593d2ad689dSVladimir Oltean static int taprio_update_budgets(struct sched_entry *entry, size_t len,
594d2ad689dSVladimir Oltean 				 int tc_consumed, int num_tc)
595d2ad689dSVladimir Oltean {
596d2ad689dSVladimir Oltean 	int tc, budget, new_budget = 0;
597d2ad689dSVladimir Oltean 
598d2ad689dSVladimir Oltean 	for (tc = 0; tc < num_tc; tc++) {
599d2ad689dSVladimir Oltean 		budget = atomic_read(&entry->budget[tc]);
600d2ad689dSVladimir Oltean 		/* Don't consume from infinite budget */
601d2ad689dSVladimir Oltean 		if (budget == INT_MAX) {
602d2ad689dSVladimir Oltean 			if (tc == tc_consumed)
603d2ad689dSVladimir Oltean 				new_budget = budget;
604d2ad689dSVladimir Oltean 			continue;
605d2ad689dSVladimir Oltean 		}
606d2ad689dSVladimir Oltean 
607d2ad689dSVladimir Oltean 		if (tc == tc_consumed)
608d2ad689dSVladimir Oltean 			new_budget = atomic_sub_return(len, &entry->budget[tc]);
609d2ad689dSVladimir Oltean 		else
610d2ad689dSVladimir Oltean 			atomic_sub(len, &entry->budget[tc]);
611d2ad689dSVladimir Oltean 	}
612d2ad689dSVladimir Oltean 
613d2ad689dSVladimir Oltean 	return new_budget;
6145a781ccbSVinicius Costa Gomes }
6155a781ccbSVinicius Costa Gomes 
61692f96667SVladimir Oltean static struct sk_buff *taprio_dequeue_from_txq(struct Qdisc *sch, int txq,
61792f96667SVladimir Oltean 					       struct sched_entry *entry,
61892f96667SVladimir Oltean 					       u32 gate_mask)
61992f96667SVladimir Oltean {
62092f96667SVladimir Oltean 	struct taprio_sched *q = qdisc_priv(sch);
62192f96667SVladimir Oltean 	struct net_device *dev = qdisc_dev(sch);
62292f96667SVladimir Oltean 	struct Qdisc *child = q->qdiscs[txq];
623d2ad689dSVladimir Oltean 	int num_tc = netdev_get_num_tc(dev);
62492f96667SVladimir Oltean 	struct sk_buff *skb;
62592f96667SVladimir Oltean 	ktime_t guard;
62692f96667SVladimir Oltean 	int prio;
62792f96667SVladimir Oltean 	int len;
62892f96667SVladimir Oltean 	u8 tc;
62992f96667SVladimir Oltean 
63092f96667SVladimir Oltean 	if (unlikely(!child))
63192f96667SVladimir Oltean 		return NULL;
63292f96667SVladimir Oltean 
6334c229427SVladimir Oltean 	if (TXTIME_ASSIST_IS_ENABLED(q->flags))
6344c229427SVladimir Oltean 		goto skip_peek_checks;
63592f96667SVladimir Oltean 
63692f96667SVladimir Oltean 	skb = child->ops->peek(child);
63792f96667SVladimir Oltean 	if (!skb)
63892f96667SVladimir Oltean 		return NULL;
63992f96667SVladimir Oltean 
64092f96667SVladimir Oltean 	prio = skb->priority;
64192f96667SVladimir Oltean 	tc = netdev_get_prio_tc_map(dev, prio);
64292f96667SVladimir Oltean 
64392f96667SVladimir Oltean 	if (!(gate_mask & BIT(tc)))
64492f96667SVladimir Oltean 		return NULL;
64592f96667SVladimir Oltean 
64692f96667SVladimir Oltean 	len = qdisc_pkt_len(skb);
64792f96667SVladimir Oltean 	guard = ktime_add_ns(taprio_get_time(q), length_to_duration(q, len));
64892f96667SVladimir Oltean 
64992f96667SVladimir Oltean 	/* In the case that there's no gate entry, there's no
65092f96667SVladimir Oltean 	 * guard band ...
65192f96667SVladimir Oltean 	 */
65292f96667SVladimir Oltean 	if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
653*a1e6ad30SVladimir Oltean 	    !taprio_entry_allows_tx(guard, entry, tc))
65492f96667SVladimir Oltean 		return NULL;
65592f96667SVladimir Oltean 
65692f96667SVladimir Oltean 	/* ... and no budget. */
65792f96667SVladimir Oltean 	if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
658d2ad689dSVladimir Oltean 	    taprio_update_budgets(entry, len, tc, num_tc) < 0)
65992f96667SVladimir Oltean 		return NULL;
66092f96667SVladimir Oltean 
6614c229427SVladimir Oltean skip_peek_checks:
66292f96667SVladimir Oltean 	skb = child->ops->dequeue(child);
66392f96667SVladimir Oltean 	if (unlikely(!skb))
66492f96667SVladimir Oltean 		return NULL;
66592f96667SVladimir Oltean 
66692f96667SVladimir Oltean 	qdisc_bstats_update(sch, skb);
66792f96667SVladimir Oltean 	qdisc_qstats_backlog_dec(sch, skb);
66892f96667SVladimir Oltean 	sch->q.qlen--;
66992f96667SVladimir Oltean 
67092f96667SVladimir Oltean 	return skb;
67192f96667SVladimir Oltean }
67292f96667SVladimir Oltean 
6732f530df7SVladimir Oltean static void taprio_next_tc_txq(struct net_device *dev, int tc, int *txq)
6742f530df7SVladimir Oltean {
6752f530df7SVladimir Oltean 	int offset = dev->tc_to_txq[tc].offset;
6762f530df7SVladimir Oltean 	int count = dev->tc_to_txq[tc].count;
6772f530df7SVladimir Oltean 
6782f530df7SVladimir Oltean 	(*txq)++;
6792f530df7SVladimir Oltean 	if (*txq == offset + count)
6802f530df7SVladimir Oltean 		*txq = offset;
6812f530df7SVladimir Oltean }
6822f530df7SVladimir Oltean 
6832f530df7SVladimir Oltean /* Prioritize higher traffic classes, and select among TXQs belonging to the
6842f530df7SVladimir Oltean  * same TC using round robin
6852f530df7SVladimir Oltean  */
6862f530df7SVladimir Oltean static struct sk_buff *taprio_dequeue_tc_priority(struct Qdisc *sch,
6872f530df7SVladimir Oltean 						  struct sched_entry *entry,
6882f530df7SVladimir Oltean 						  u32 gate_mask)
6892f530df7SVladimir Oltean {
6902f530df7SVladimir Oltean 	struct taprio_sched *q = qdisc_priv(sch);
6912f530df7SVladimir Oltean 	struct net_device *dev = qdisc_dev(sch);
6922f530df7SVladimir Oltean 	int num_tc = netdev_get_num_tc(dev);
6932f530df7SVladimir Oltean 	struct sk_buff *skb;
6942f530df7SVladimir Oltean 	int tc;
6952f530df7SVladimir Oltean 
6962f530df7SVladimir Oltean 	for (tc = num_tc - 1; tc >= 0; tc--) {
6972f530df7SVladimir Oltean 		int first_txq = q->cur_txq[tc];
6982f530df7SVladimir Oltean 
6992f530df7SVladimir Oltean 		if (!(gate_mask & BIT(tc)))
7002f530df7SVladimir Oltean 			continue;
7012f530df7SVladimir Oltean 
7022f530df7SVladimir Oltean 		do {
7032f530df7SVladimir Oltean 			skb = taprio_dequeue_from_txq(sch, q->cur_txq[tc],
7042f530df7SVladimir Oltean 						      entry, gate_mask);
7052f530df7SVladimir Oltean 
7062f530df7SVladimir Oltean 			taprio_next_tc_txq(dev, tc, &q->cur_txq[tc]);
7072f530df7SVladimir Oltean 
7082f530df7SVladimir Oltean 			if (skb)
7092f530df7SVladimir Oltean 				return skb;
7102f530df7SVladimir Oltean 		} while (q->cur_txq[tc] != first_txq);
7112f530df7SVladimir Oltean 	}
7122f530df7SVladimir Oltean 
7132f530df7SVladimir Oltean 	return NULL;
7142f530df7SVladimir Oltean }
7152f530df7SVladimir Oltean 
7162f530df7SVladimir Oltean /* Broken way of prioritizing smaller TXQ indices and ignoring the traffic
7172f530df7SVladimir Oltean  * class other than to determine whether the gate is open or not
7182f530df7SVladimir Oltean  */
7192f530df7SVladimir Oltean static struct sk_buff *taprio_dequeue_txq_priority(struct Qdisc *sch,
7202f530df7SVladimir Oltean 						   struct sched_entry *entry,
7212f530df7SVladimir Oltean 						   u32 gate_mask)
7222f530df7SVladimir Oltean {
7232f530df7SVladimir Oltean 	struct net_device *dev = qdisc_dev(sch);
7242f530df7SVladimir Oltean 	struct sk_buff *skb;
7252f530df7SVladimir Oltean 	int i;
7262f530df7SVladimir Oltean 
7272f530df7SVladimir Oltean 	for (i = 0; i < dev->num_tx_queues; i++) {
7282f530df7SVladimir Oltean 		skb = taprio_dequeue_from_txq(sch, i, entry, gate_mask);
7292f530df7SVladimir Oltean 		if (skb)
7302f530df7SVladimir Oltean 			return skb;
7312f530df7SVladimir Oltean 	}
7322f530df7SVladimir Oltean 
7332f530df7SVladimir Oltean 	return NULL;
7342f530df7SVladimir Oltean }
7352f530df7SVladimir Oltean 
7362c08a4f8SVladimir Oltean /* Will not be called in the full offload case, since the TX queues are
7372c08a4f8SVladimir Oltean  * attached to the Qdisc created using qdisc_create_dflt()
7382c08a4f8SVladimir Oltean  */
73925becba6SVladimir Oltean static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
7405a781ccbSVinicius Costa Gomes {
7415a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
7428c79f0eaSVinicius Costa Gomes 	struct sk_buff *skb = NULL;
7435a781ccbSVinicius Costa Gomes 	struct sched_entry *entry;
7445a781ccbSVinicius Costa Gomes 	u32 gate_mask;
7455a781ccbSVinicius Costa Gomes 
7465a781ccbSVinicius Costa Gomes 	rcu_read_lock();
7475a781ccbSVinicius Costa Gomes 	entry = rcu_dereference(q->current_entry);
7485a781ccbSVinicius Costa Gomes 	/* if there's no entry, it means that the schedule didn't
7495a781ccbSVinicius Costa Gomes 	 * start yet, so force all gates to be open, this is in
7505a781ccbSVinicius Costa Gomes 	 * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
751633fa666SJesper Dangaard Brouer 	 * "AdminGateStates"
7525a781ccbSVinicius Costa Gomes 	 */
7535a781ccbSVinicius Costa Gomes 	gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
7545a781ccbSVinicius Costa Gomes 	if (!gate_mask)
7558c79f0eaSVinicius Costa Gomes 		goto done;
7565a781ccbSVinicius Costa Gomes 
7572f530df7SVladimir Oltean 	if (static_branch_unlikely(&taprio_have_broken_mqprio) &&
7582f530df7SVladimir Oltean 	    !static_branch_likely(&taprio_have_working_mqprio)) {
7592f530df7SVladimir Oltean 		/* Single NIC kind which is broken */
7602f530df7SVladimir Oltean 		skb = taprio_dequeue_txq_priority(sch, entry, gate_mask);
7612f530df7SVladimir Oltean 	} else if (static_branch_likely(&taprio_have_working_mqprio) &&
7622f530df7SVladimir Oltean 		   !static_branch_unlikely(&taprio_have_broken_mqprio)) {
7632f530df7SVladimir Oltean 		/* Single NIC kind which prioritizes properly */
7642f530df7SVladimir Oltean 		skb = taprio_dequeue_tc_priority(sch, entry, gate_mask);
7652f530df7SVladimir Oltean 	} else {
7662f530df7SVladimir Oltean 		/* Mixed NIC kinds present in system, need dynamic testing */
7672f530df7SVladimir Oltean 		if (q->broken_mqprio)
7682f530df7SVladimir Oltean 			skb = taprio_dequeue_txq_priority(sch, entry, gate_mask);
7692f530df7SVladimir Oltean 		else
7702f530df7SVladimir Oltean 			skb = taprio_dequeue_tc_priority(sch, entry, gate_mask);
7715a781ccbSVinicius Costa Gomes 	}
7725a781ccbSVinicius Costa Gomes 
7738c79f0eaSVinicius Costa Gomes done:
7748c79f0eaSVinicius Costa Gomes 	rcu_read_unlock();
7758c79f0eaSVinicius Costa Gomes 
7768c79f0eaSVinicius Costa Gomes 	return skb;
7775a781ccbSVinicius Costa Gomes }
7785a781ccbSVinicius Costa Gomes 
7796ca6a665SVinicius Costa Gomes static bool should_restart_cycle(const struct sched_gate_list *oper,
7806ca6a665SVinicius Costa Gomes 				 const struct sched_entry *entry)
7816ca6a665SVinicius Costa Gomes {
7826ca6a665SVinicius Costa Gomes 	if (list_is_last(&entry->list, &oper->entries))
7836ca6a665SVinicius Costa Gomes 		return true;
7846ca6a665SVinicius Costa Gomes 
785e5517551SVladimir Oltean 	if (ktime_compare(entry->end_time, oper->cycle_end_time) == 0)
7866ca6a665SVinicius Costa Gomes 		return true;
7876ca6a665SVinicius Costa Gomes 
7886ca6a665SVinicius Costa Gomes 	return false;
7896ca6a665SVinicius Costa Gomes }
7906ca6a665SVinicius Costa Gomes 
791a3d43c0dSVinicius Costa Gomes static bool should_change_schedules(const struct sched_gate_list *admin,
792a3d43c0dSVinicius Costa Gomes 				    const struct sched_gate_list *oper,
793e5517551SVladimir Oltean 				    ktime_t end_time)
794a3d43c0dSVinicius Costa Gomes {
795c25031e9SVinicius Costa Gomes 	ktime_t next_base_time, extension_time;
796a3d43c0dSVinicius Costa Gomes 
797a3d43c0dSVinicius Costa Gomes 	if (!admin)
798a3d43c0dSVinicius Costa Gomes 		return false;
799a3d43c0dSVinicius Costa Gomes 
800a3d43c0dSVinicius Costa Gomes 	next_base_time = sched_base_time(admin);
801a3d43c0dSVinicius Costa Gomes 
802e5517551SVladimir Oltean 	/* This is the simple case, the end_time would fall after
803a3d43c0dSVinicius Costa Gomes 	 * the next schedule base_time.
804a3d43c0dSVinicius Costa Gomes 	 */
805e5517551SVladimir Oltean 	if (ktime_compare(next_base_time, end_time) <= 0)
806a3d43c0dSVinicius Costa Gomes 		return true;
807a3d43c0dSVinicius Costa Gomes 
808e5517551SVladimir Oltean 	/* This is the cycle_time_extension case, if the end_time
809c25031e9SVinicius Costa Gomes 	 * plus the amount that can be extended would fall after the
810c25031e9SVinicius Costa Gomes 	 * next schedule base_time, we can extend the current schedule
811c25031e9SVinicius Costa Gomes 	 * for that amount.
812c25031e9SVinicius Costa Gomes 	 */
813e5517551SVladimir Oltean 	extension_time = ktime_add_ns(end_time, oper->cycle_time_extension);
814c25031e9SVinicius Costa Gomes 
815c25031e9SVinicius Costa Gomes 	/* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about
816c25031e9SVinicius Costa Gomes 	 * how precisely the extension should be made. So after
817c25031e9SVinicius Costa Gomes 	 * conformance testing, this logic may change.
818c25031e9SVinicius Costa Gomes 	 */
819c25031e9SVinicius Costa Gomes 	if (ktime_compare(next_base_time, extension_time) <= 0)
820c25031e9SVinicius Costa Gomes 		return true;
821c25031e9SVinicius Costa Gomes 
822a3d43c0dSVinicius Costa Gomes 	return false;
823a3d43c0dSVinicius Costa Gomes }
824a3d43c0dSVinicius Costa Gomes 
8255a781ccbSVinicius Costa Gomes static enum hrtimer_restart advance_sched(struct hrtimer *timer)
8265a781ccbSVinicius Costa Gomes {
8275a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = container_of(timer, struct taprio_sched,
8285a781ccbSVinicius Costa Gomes 					      advance_timer);
829*a1e6ad30SVladimir Oltean 	struct net_device *dev = qdisc_dev(q->root);
830a3d43c0dSVinicius Costa Gomes 	struct sched_gate_list *oper, *admin;
831*a1e6ad30SVladimir Oltean 	int num_tc = netdev_get_num_tc(dev);
8325a781ccbSVinicius Costa Gomes 	struct sched_entry *entry, *next;
8335a781ccbSVinicius Costa Gomes 	struct Qdisc *sch = q->root;
834e5517551SVladimir Oltean 	ktime_t end_time;
835*a1e6ad30SVladimir Oltean 	int tc;
8365a781ccbSVinicius Costa Gomes 
8375a781ccbSVinicius Costa Gomes 	spin_lock(&q->current_entry_lock);
8385a781ccbSVinicius Costa Gomes 	entry = rcu_dereference_protected(q->current_entry,
8395a781ccbSVinicius Costa Gomes 					  lockdep_is_held(&q->current_entry_lock));
840a3d43c0dSVinicius Costa Gomes 	oper = rcu_dereference_protected(q->oper_sched,
841a3d43c0dSVinicius Costa Gomes 					 lockdep_is_held(&q->current_entry_lock));
842a3d43c0dSVinicius Costa Gomes 	admin = rcu_dereference_protected(q->admin_sched,
843a3d43c0dSVinicius Costa Gomes 					  lockdep_is_held(&q->current_entry_lock));
8445a781ccbSVinicius Costa Gomes 
845a3d43c0dSVinicius Costa Gomes 	if (!oper)
846a3d43c0dSVinicius Costa Gomes 		switch_schedules(q, &admin, &oper);
847a3d43c0dSVinicius Costa Gomes 
848a3d43c0dSVinicius Costa Gomes 	/* This can happen in two cases: 1. this is the very first run
849a3d43c0dSVinicius Costa Gomes 	 * of this function (i.e. we weren't running any schedule
850a3d43c0dSVinicius Costa Gomes 	 * previously); 2. The previous schedule just ended. The first
851a3d43c0dSVinicius Costa Gomes 	 * entry of all schedules are pre-calculated during the
852a3d43c0dSVinicius Costa Gomes 	 * schedule initialization.
8535a781ccbSVinicius Costa Gomes 	 */
854e5517551SVladimir Oltean 	if (unlikely(!entry || entry->end_time == oper->base_time)) {
855a3d43c0dSVinicius Costa Gomes 		next = list_first_entry(&oper->entries, struct sched_entry,
8565a781ccbSVinicius Costa Gomes 					list);
857e5517551SVladimir Oltean 		end_time = next->end_time;
8585a781ccbSVinicius Costa Gomes 		goto first_run;
8595a781ccbSVinicius Costa Gomes 	}
8605a781ccbSVinicius Costa Gomes 
8616ca6a665SVinicius Costa Gomes 	if (should_restart_cycle(oper, entry)) {
862a3d43c0dSVinicius Costa Gomes 		next = list_first_entry(&oper->entries, struct sched_entry,
8635a781ccbSVinicius Costa Gomes 					list);
864e5517551SVladimir Oltean 		oper->cycle_end_time = ktime_add_ns(oper->cycle_end_time,
8656ca6a665SVinicius Costa Gomes 						    oper->cycle_time);
8666ca6a665SVinicius Costa Gomes 	} else {
8675a781ccbSVinicius Costa Gomes 		next = list_next_entry(entry, list);
8686ca6a665SVinicius Costa Gomes 	}
8695a781ccbSVinicius Costa Gomes 
870e5517551SVladimir Oltean 	end_time = ktime_add_ns(entry->end_time, next->interval);
871e5517551SVladimir Oltean 	end_time = min_t(ktime_t, end_time, oper->cycle_end_time);
8725a781ccbSVinicius Costa Gomes 
873*a1e6ad30SVladimir Oltean 	for (tc = 0; tc < num_tc; tc++) {
874*a1e6ad30SVladimir Oltean 		if (next->gate_duration[tc] == oper->cycle_time)
875*a1e6ad30SVladimir Oltean 			next->gate_close_time[tc] = KTIME_MAX;
876*a1e6ad30SVladimir Oltean 		else
877*a1e6ad30SVladimir Oltean 			next->gate_close_time[tc] = ktime_add_ns(entry->end_time,
878*a1e6ad30SVladimir Oltean 								 next->gate_duration[tc]);
879*a1e6ad30SVladimir Oltean 	}
880*a1e6ad30SVladimir Oltean 
881e5517551SVladimir Oltean 	if (should_change_schedules(admin, oper, end_time)) {
882a3d43c0dSVinicius Costa Gomes 		/* Set things so the next time this runs, the new
883a3d43c0dSVinicius Costa Gomes 		 * schedule runs.
884a3d43c0dSVinicius Costa Gomes 		 */
885e5517551SVladimir Oltean 		end_time = sched_base_time(admin);
886a3d43c0dSVinicius Costa Gomes 		switch_schedules(q, &admin, &oper);
887a3d43c0dSVinicius Costa Gomes 	}
888a3d43c0dSVinicius Costa Gomes 
889e5517551SVladimir Oltean 	next->end_time = end_time;
890d2ad689dSVladimir Oltean 	taprio_set_budgets(q, oper, next);
8915a781ccbSVinicius Costa Gomes 
8925a781ccbSVinicius Costa Gomes first_run:
8935a781ccbSVinicius Costa Gomes 	rcu_assign_pointer(q->current_entry, next);
8945a781ccbSVinicius Costa Gomes 	spin_unlock(&q->current_entry_lock);
8955a781ccbSVinicius Costa Gomes 
896e5517551SVladimir Oltean 	hrtimer_set_expires(&q->advance_timer, end_time);
8975a781ccbSVinicius Costa Gomes 
8985a781ccbSVinicius Costa Gomes 	rcu_read_lock();
8995a781ccbSVinicius Costa Gomes 	__netif_schedule(sch);
9005a781ccbSVinicius Costa Gomes 	rcu_read_unlock();
9015a781ccbSVinicius Costa Gomes 
9025a781ccbSVinicius Costa Gomes 	return HRTIMER_RESTART;
9035a781ccbSVinicius Costa Gomes }
9045a781ccbSVinicius Costa Gomes 
9055a781ccbSVinicius Costa Gomes static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
9065a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_SCHED_ENTRY_INDEX]	   = { .type = NLA_U32 },
9075a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_SCHED_ENTRY_CMD]	   = { .type = NLA_U8 },
9085a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 },
9095a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]  = { .type = NLA_U32 },
9105a781ccbSVinicius Costa Gomes };
9115a781ccbSVinicius Costa Gomes 
912a54fc09eSVladimir Oltean static const struct nla_policy taprio_tc_policy[TCA_TAPRIO_TC_ENTRY_MAX + 1] = {
913a54fc09eSVladimir Oltean 	[TCA_TAPRIO_TC_ENTRY_INDEX]	   = { .type = NLA_U32 },
914a54fc09eSVladimir Oltean 	[TCA_TAPRIO_TC_ENTRY_MAX_SDU]	   = { .type = NLA_U32 },
915a54fc09eSVladimir Oltean };
916a54fc09eSVladimir Oltean 
9175a781ccbSVinicius Costa Gomes static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
9185a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_PRIOMAP]	       = {
9195a781ccbSVinicius Costa Gomes 		.len = sizeof(struct tc_mqprio_qopt)
9205a781ccbSVinicius Costa Gomes 	},
9215a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]           = { .type = NLA_NESTED },
9225a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]            = { .type = NLA_S64 },
9235a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]         = { .type = NLA_NESTED },
9245a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_SCHED_CLOCKID]              = { .type = NLA_S32 },
9256ca6a665SVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]           = { .type = NLA_S64 },
926c25031e9SVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
92749c684d7SVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_FLAGS]                      = { .type = NLA_U32 },
928e13aaa06SJakub Kicinski 	[TCA_TAPRIO_ATTR_TXTIME_DELAY]		     = { .type = NLA_U32 },
929a54fc09eSVladimir Oltean 	[TCA_TAPRIO_ATTR_TC_ENTRY]		     = { .type = NLA_NESTED },
9305a781ccbSVinicius Costa Gomes };
9315a781ccbSVinicius Costa Gomes 
932b5b73b26SVinicius Costa Gomes static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb,
933b5b73b26SVinicius Costa Gomes 			    struct sched_entry *entry,
9345a781ccbSVinicius Costa Gomes 			    struct netlink_ext_ack *extack)
9355a781ccbSVinicius Costa Gomes {
936b5b73b26SVinicius Costa Gomes 	int min_duration = length_to_duration(q, ETH_ZLEN);
9375a781ccbSVinicius Costa Gomes 	u32 interval = 0;
9385a781ccbSVinicius Costa Gomes 
9395a781ccbSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
9405a781ccbSVinicius Costa Gomes 		entry->command = nla_get_u8(
9415a781ccbSVinicius Costa Gomes 			tb[TCA_TAPRIO_SCHED_ENTRY_CMD]);
9425a781ccbSVinicius Costa Gomes 
9435a781ccbSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK])
9445a781ccbSVinicius Costa Gomes 		entry->gate_mask = nla_get_u32(
9455a781ccbSVinicius Costa Gomes 			tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]);
9465a781ccbSVinicius Costa Gomes 
9475a781ccbSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL])
9485a781ccbSVinicius Costa Gomes 		interval = nla_get_u32(
9495a781ccbSVinicius Costa Gomes 			tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
9505a781ccbSVinicius Costa Gomes 
951b5b73b26SVinicius Costa Gomes 	/* The interval should allow at least the minimum ethernet
952b5b73b26SVinicius Costa Gomes 	 * frame to go out.
953b5b73b26SVinicius Costa Gomes 	 */
954b5b73b26SVinicius Costa Gomes 	if (interval < min_duration) {
9555a781ccbSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
9565a781ccbSVinicius Costa Gomes 		return -EINVAL;
9575a781ccbSVinicius Costa Gomes 	}
9585a781ccbSVinicius Costa Gomes 
9595a781ccbSVinicius Costa Gomes 	entry->interval = interval;
9605a781ccbSVinicius Costa Gomes 
9615a781ccbSVinicius Costa Gomes 	return 0;
9625a781ccbSVinicius Costa Gomes }
9635a781ccbSVinicius Costa Gomes 
964b5b73b26SVinicius Costa Gomes static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n,
965b5b73b26SVinicius Costa Gomes 			     struct sched_entry *entry, int index,
966b5b73b26SVinicius Costa Gomes 			     struct netlink_ext_ack *extack)
9675a781ccbSVinicius Costa Gomes {
9685a781ccbSVinicius Costa Gomes 	struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
9695a781ccbSVinicius Costa Gomes 	int err;
9705a781ccbSVinicius Costa Gomes 
9718cb08174SJohannes Berg 	err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n,
9725a781ccbSVinicius Costa Gomes 					  entry_policy, NULL);
9735a781ccbSVinicius Costa Gomes 	if (err < 0) {
9745a781ccbSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Could not parse nested entry");
9755a781ccbSVinicius Costa Gomes 		return -EINVAL;
9765a781ccbSVinicius Costa Gomes 	}
9775a781ccbSVinicius Costa Gomes 
9785a781ccbSVinicius Costa Gomes 	entry->index = index;
9795a781ccbSVinicius Costa Gomes 
980b5b73b26SVinicius Costa Gomes 	return fill_sched_entry(q, tb, entry, extack);
9815a781ccbSVinicius Costa Gomes }
9825a781ccbSVinicius Costa Gomes 
983b5b73b26SVinicius Costa Gomes static int parse_sched_list(struct taprio_sched *q, struct nlattr *list,
984a3d43c0dSVinicius Costa Gomes 			    struct sched_gate_list *sched,
9855a781ccbSVinicius Costa Gomes 			    struct netlink_ext_ack *extack)
9865a781ccbSVinicius Costa Gomes {
9875a781ccbSVinicius Costa Gomes 	struct nlattr *n;
9885a781ccbSVinicius Costa Gomes 	int err, rem;
9895a781ccbSVinicius Costa Gomes 	int i = 0;
9905a781ccbSVinicius Costa Gomes 
9915a781ccbSVinicius Costa Gomes 	if (!list)
9925a781ccbSVinicius Costa Gomes 		return -EINVAL;
9935a781ccbSVinicius Costa Gomes 
9945a781ccbSVinicius Costa Gomes 	nla_for_each_nested(n, list, rem) {
9955a781ccbSVinicius Costa Gomes 		struct sched_entry *entry;
9965a781ccbSVinicius Costa Gomes 
9975a781ccbSVinicius Costa Gomes 		if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) {
9985a781ccbSVinicius Costa Gomes 			NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'");
9995a781ccbSVinicius Costa Gomes 			continue;
10005a781ccbSVinicius Costa Gomes 		}
10015a781ccbSVinicius Costa Gomes 
10025a781ccbSVinicius Costa Gomes 		entry = kzalloc(sizeof(*entry), GFP_KERNEL);
10035a781ccbSVinicius Costa Gomes 		if (!entry) {
10045a781ccbSVinicius Costa Gomes 			NL_SET_ERR_MSG(extack, "Not enough memory for entry");
10055a781ccbSVinicius Costa Gomes 			return -ENOMEM;
10065a781ccbSVinicius Costa Gomes 		}
10075a781ccbSVinicius Costa Gomes 
1008b5b73b26SVinicius Costa Gomes 		err = parse_sched_entry(q, n, entry, i, extack);
10095a781ccbSVinicius Costa Gomes 		if (err < 0) {
10105a781ccbSVinicius Costa Gomes 			kfree(entry);
10115a781ccbSVinicius Costa Gomes 			return err;
10125a781ccbSVinicius Costa Gomes 		}
10135a781ccbSVinicius Costa Gomes 
1014a3d43c0dSVinicius Costa Gomes 		list_add_tail(&entry->list, &sched->entries);
10155a781ccbSVinicius Costa Gomes 		i++;
10165a781ccbSVinicius Costa Gomes 	}
10175a781ccbSVinicius Costa Gomes 
1018a3d43c0dSVinicius Costa Gomes 	sched->num_entries = i;
10195a781ccbSVinicius Costa Gomes 
10205a781ccbSVinicius Costa Gomes 	return i;
10215a781ccbSVinicius Costa Gomes }
10225a781ccbSVinicius Costa Gomes 
1023b5b73b26SVinicius Costa Gomes static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
1024a3d43c0dSVinicius Costa Gomes 				 struct sched_gate_list *new,
10255a781ccbSVinicius Costa Gomes 				 struct netlink_ext_ack *extack)
10265a781ccbSVinicius Costa Gomes {
10275a781ccbSVinicius Costa Gomes 	int err = 0;
10285a781ccbSVinicius Costa Gomes 
1029a3d43c0dSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) {
1030a3d43c0dSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Adding a single entry is not supported");
1031a3d43c0dSVinicius Costa Gomes 		return -ENOTSUPP;
1032a3d43c0dSVinicius Costa Gomes 	}
10335a781ccbSVinicius Costa Gomes 
10345a781ccbSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
1035a3d43c0dSVinicius Costa Gomes 		new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
10365a781ccbSVinicius Costa Gomes 
1037c25031e9SVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION])
1038c25031e9SVinicius Costa Gomes 		new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]);
1039c25031e9SVinicius Costa Gomes 
10406ca6a665SVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME])
10416ca6a665SVinicius Costa Gomes 		new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
10426ca6a665SVinicius Costa Gomes 
10435a781ccbSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
1044b5b73b26SVinicius Costa Gomes 		err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST],
1045b5b73b26SVinicius Costa Gomes 				       new, extack);
1046a3d43c0dSVinicius Costa Gomes 	if (err < 0)
10475a781ccbSVinicius Costa Gomes 		return err;
1048a3d43c0dSVinicius Costa Gomes 
1049037be037SVedang Patel 	if (!new->cycle_time) {
1050037be037SVedang Patel 		struct sched_entry *entry;
1051037be037SVedang Patel 		ktime_t cycle = 0;
1052037be037SVedang Patel 
1053037be037SVedang Patel 		list_for_each_entry(entry, &new->entries, list)
1054037be037SVedang Patel 			cycle = ktime_add_ns(cycle, entry->interval);
1055ed8157f1SDu Cheng 
1056ed8157f1SDu Cheng 		if (!cycle) {
1057ed8157f1SDu Cheng 			NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
1058ed8157f1SDu Cheng 			return -EINVAL;
1059ed8157f1SDu Cheng 		}
1060ed8157f1SDu Cheng 
1061037be037SVedang Patel 		new->cycle_time = cycle;
1062037be037SVedang Patel 	}
1063037be037SVedang Patel 
1064a306a90cSVladimir Oltean 	taprio_calculate_gate_durations(q, new);
1065a306a90cSVladimir Oltean 
1066a3d43c0dSVinicius Costa Gomes 	return 0;
10675a781ccbSVinicius Costa Gomes }
10685a781ccbSVinicius Costa Gomes 
10695a781ccbSVinicius Costa Gomes static int taprio_parse_mqprio_opt(struct net_device *dev,
10705a781ccbSVinicius Costa Gomes 				   struct tc_mqprio_qopt *qopt,
10714cfd5779SVedang Patel 				   struct netlink_ext_ack *extack,
10724cfd5779SVedang Patel 				   u32 taprio_flags)
10735a781ccbSVinicius Costa Gomes {
10741dfe086dSVladimir Oltean 	bool allow_overlapping_txqs = TXTIME_ASSIST_IS_ENABLED(taprio_flags);
10755a781ccbSVinicius Costa Gomes 
1076a3d43c0dSVinicius Costa Gomes 	if (!qopt && !dev->num_tc) {
10775a781ccbSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
10785a781ccbSVinicius Costa Gomes 		return -EINVAL;
10795a781ccbSVinicius Costa Gomes 	}
10805a781ccbSVinicius Costa Gomes 
1081a3d43c0dSVinicius Costa Gomes 	/* If num_tc is already set, it means that the user already
1082a3d43c0dSVinicius Costa Gomes 	 * configured the mqprio part
1083a3d43c0dSVinicius Costa Gomes 	 */
1084a3d43c0dSVinicius Costa Gomes 	if (dev->num_tc)
1085a3d43c0dSVinicius Costa Gomes 		return 0;
1086a3d43c0dSVinicius Costa Gomes 
10875a781ccbSVinicius Costa Gomes 	/* taprio imposes that traffic classes map 1:n to tx queues */
10885a781ccbSVinicius Costa Gomes 	if (qopt->num_tc > dev->num_tx_queues) {
10895a781ccbSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues");
10905a781ccbSVinicius Costa Gomes 		return -EINVAL;
10915a781ccbSVinicius Costa Gomes 	}
10925a781ccbSVinicius Costa Gomes 
10931dfe086dSVladimir Oltean 	/* For some reason, in txtime-assist mode, we allow TXQ ranges for
10941dfe086dSVladimir Oltean 	 * different TCs to overlap, and just validate the TXQ ranges.
10955a781ccbSVinicius Costa Gomes 	 */
10961dfe086dSVladimir Oltean 	return mqprio_validate_qopt(dev, qopt, true, allow_overlapping_txqs,
10971dfe086dSVladimir Oltean 				    extack);
10985a781ccbSVinicius Costa Gomes }
10995a781ccbSVinicius Costa Gomes 
1100a3d43c0dSVinicius Costa Gomes static int taprio_get_start_time(struct Qdisc *sch,
1101a3d43c0dSVinicius Costa Gomes 				 struct sched_gate_list *sched,
1102a3d43c0dSVinicius Costa Gomes 				 ktime_t *start)
11035a781ccbSVinicius Costa Gomes {
11045a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
11055a781ccbSVinicius Costa Gomes 	ktime_t now, base, cycle;
11065a781ccbSVinicius Costa Gomes 	s64 n;
11075a781ccbSVinicius Costa Gomes 
1108a3d43c0dSVinicius Costa Gomes 	base = sched_base_time(sched);
11097ede7b03SVedang Patel 	now = taprio_get_time(q);
11108599099fSAndre Guedes 
11118599099fSAndre Guedes 	if (ktime_after(base, now)) {
11128599099fSAndre Guedes 		*start = base;
11138599099fSAndre Guedes 		return 0;
11148599099fSAndre Guedes 	}
11155a781ccbSVinicius Costa Gomes 
1116037be037SVedang Patel 	cycle = sched->cycle_time;
11175a781ccbSVinicius Costa Gomes 
11188599099fSAndre Guedes 	/* The qdisc is expected to have at least one sched_entry.  Moreover,
11198599099fSAndre Guedes 	 * any entry must have 'interval' > 0. Thus if the cycle time is zero,
11208599099fSAndre Guedes 	 * something went really wrong. In that case, we should warn about this
11218599099fSAndre Guedes 	 * inconsistent state and return error.
11228599099fSAndre Guedes 	 */
11238599099fSAndre Guedes 	if (WARN_ON(!cycle))
11248599099fSAndre Guedes 		return -EFAULT;
11255a781ccbSVinicius Costa Gomes 
11265a781ccbSVinicius Costa Gomes 	/* Schedule the start time for the beginning of the next
11275a781ccbSVinicius Costa Gomes 	 * cycle.
11285a781ccbSVinicius Costa Gomes 	 */
11295a781ccbSVinicius Costa Gomes 	n = div64_s64(ktime_sub_ns(now, base), cycle);
11308599099fSAndre Guedes 	*start = ktime_add_ns(base, (n + 1) * cycle);
11318599099fSAndre Guedes 	return 0;
11325a781ccbSVinicius Costa Gomes }
11335a781ccbSVinicius Costa Gomes 
1134e5517551SVladimir Oltean static void setup_first_end_time(struct taprio_sched *q,
1135a3d43c0dSVinicius Costa Gomes 				 struct sched_gate_list *sched, ktime_t base)
11365a781ccbSVinicius Costa Gomes {
1137*a1e6ad30SVladimir Oltean 	struct net_device *dev = qdisc_dev(q->root);
1138*a1e6ad30SVladimir Oltean 	int num_tc = netdev_get_num_tc(dev);
11395a781ccbSVinicius Costa Gomes 	struct sched_entry *first;
11406ca6a665SVinicius Costa Gomes 	ktime_t cycle;
1141*a1e6ad30SVladimir Oltean 	int tc;
11425a781ccbSVinicius Costa Gomes 
1143a3d43c0dSVinicius Costa Gomes 	first = list_first_entry(&sched->entries,
1144a3d43c0dSVinicius Costa Gomes 				 struct sched_entry, list);
11455a781ccbSVinicius Costa Gomes 
1146037be037SVedang Patel 	cycle = sched->cycle_time;
11476ca6a665SVinicius Costa Gomes 
11486ca6a665SVinicius Costa Gomes 	/* FIXME: find a better place to do this */
1149e5517551SVladimir Oltean 	sched->cycle_end_time = ktime_add_ns(base, cycle);
11506ca6a665SVinicius Costa Gomes 
1151e5517551SVladimir Oltean 	first->end_time = ktime_add_ns(base, first->interval);
1152d2ad689dSVladimir Oltean 	taprio_set_budgets(q, sched, first);
1153*a1e6ad30SVladimir Oltean 
1154*a1e6ad30SVladimir Oltean 	for (tc = 0; tc < num_tc; tc++) {
1155*a1e6ad30SVladimir Oltean 		if (first->gate_duration[tc] == sched->cycle_time)
1156*a1e6ad30SVladimir Oltean 			first->gate_close_time[tc] = KTIME_MAX;
1157*a1e6ad30SVladimir Oltean 		else
1158*a1e6ad30SVladimir Oltean 			first->gate_close_time[tc] = ktime_add_ns(base, first->gate_duration[tc]);
1159*a1e6ad30SVladimir Oltean 	}
1160*a1e6ad30SVladimir Oltean 
11615a781ccbSVinicius Costa Gomes 	rcu_assign_pointer(q->current_entry, NULL);
1162a3d43c0dSVinicius Costa Gomes }
11635a781ccbSVinicius Costa Gomes 
1164a3d43c0dSVinicius Costa Gomes static void taprio_start_sched(struct Qdisc *sch,
1165a3d43c0dSVinicius Costa Gomes 			       ktime_t start, struct sched_gate_list *new)
1166a3d43c0dSVinicius Costa Gomes {
1167a3d43c0dSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
1168a3d43c0dSVinicius Costa Gomes 	ktime_t expires;
1169a3d43c0dSVinicius Costa Gomes 
11709c66d156SVinicius Costa Gomes 	if (FULL_OFFLOAD_IS_ENABLED(q->flags))
11719c66d156SVinicius Costa Gomes 		return;
11729c66d156SVinicius Costa Gomes 
1173a3d43c0dSVinicius Costa Gomes 	expires = hrtimer_get_expires(&q->advance_timer);
1174a3d43c0dSVinicius Costa Gomes 	if (expires == 0)
1175a3d43c0dSVinicius Costa Gomes 		expires = KTIME_MAX;
1176a3d43c0dSVinicius Costa Gomes 
1177a3d43c0dSVinicius Costa Gomes 	/* If the new schedule starts before the next expiration, we
1178a3d43c0dSVinicius Costa Gomes 	 * reprogram it to the earliest one, so we change the admin
1179a3d43c0dSVinicius Costa Gomes 	 * schedule to the operational one at the right time.
1180a3d43c0dSVinicius Costa Gomes 	 */
1181a3d43c0dSVinicius Costa Gomes 	start = min_t(ktime_t, start, expires);
11825a781ccbSVinicius Costa Gomes 
11835a781ccbSVinicius Costa Gomes 	hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
11845a781ccbSVinicius Costa Gomes }
11855a781ccbSVinicius Costa Gomes 
11867b9eba7bSLeandro Dorileo static void taprio_set_picos_per_byte(struct net_device *dev,
11877b9eba7bSLeandro Dorileo 				      struct taprio_sched *q)
11887b9eba7bSLeandro Dorileo {
11897b9eba7bSLeandro Dorileo 	struct ethtool_link_ksettings ecmd;
1190f04b514cSVladimir Oltean 	int speed = SPEED_10;
1191f04b514cSVladimir Oltean 	int picos_per_byte;
1192f04b514cSVladimir Oltean 	int err;
11937b9eba7bSLeandro Dorileo 
1194f04b514cSVladimir Oltean 	err = __ethtool_get_link_ksettings(dev, &ecmd);
1195f04b514cSVladimir Oltean 	if (err < 0)
1196f04b514cSVladimir Oltean 		goto skip;
1197f04b514cSVladimir Oltean 
11989a9251a3SVladimir Oltean 	if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
1199f04b514cSVladimir Oltean 		speed = ecmd.base.speed;
1200f04b514cSVladimir Oltean 
1201f04b514cSVladimir Oltean skip:
120268ce6688SVladimir Oltean 	picos_per_byte = (USEC_PER_SEC * 8) / speed;
12037b9eba7bSLeandro Dorileo 
12047b9eba7bSLeandro Dorileo 	atomic64_set(&q->picos_per_byte, picos_per_byte);
12057b9eba7bSLeandro Dorileo 	netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
12067b9eba7bSLeandro Dorileo 		   dev->name, (long long)atomic64_read(&q->picos_per_byte),
12077b9eba7bSLeandro Dorileo 		   ecmd.base.speed);
12087b9eba7bSLeandro Dorileo }
12097b9eba7bSLeandro Dorileo 
12107b9eba7bSLeandro Dorileo static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
12117b9eba7bSLeandro Dorileo 			       void *ptr)
12127b9eba7bSLeandro Dorileo {
12137b9eba7bSLeandro Dorileo 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
12147b9eba7bSLeandro Dorileo 	struct taprio_sched *q;
12157b9eba7bSLeandro Dorileo 
12167b9eba7bSLeandro Dorileo 	ASSERT_RTNL();
12177b9eba7bSLeandro Dorileo 
12187b9eba7bSLeandro Dorileo 	if (event != NETDEV_UP && event != NETDEV_CHANGE)
12197b9eba7bSLeandro Dorileo 		return NOTIFY_DONE;
12207b9eba7bSLeandro Dorileo 
12217b9eba7bSLeandro Dorileo 	list_for_each_entry(q, &taprio_list, taprio_list) {
1222fc4f2fd0SVladimir Oltean 		if (dev != qdisc_dev(q->root))
1223fc4f2fd0SVladimir Oltean 			continue;
1224fc4f2fd0SVladimir Oltean 
1225fc4f2fd0SVladimir Oltean 		taprio_set_picos_per_byte(dev, q);
12267b9eba7bSLeandro Dorileo 		break;
12277b9eba7bSLeandro Dorileo 	}
12287b9eba7bSLeandro Dorileo 
12297b9eba7bSLeandro Dorileo 	return NOTIFY_DONE;
12307b9eba7bSLeandro Dorileo }
12317b9eba7bSLeandro Dorileo 
12324cfd5779SVedang Patel static void setup_txtime(struct taprio_sched *q,
12334cfd5779SVedang Patel 			 struct sched_gate_list *sched, ktime_t base)
12344cfd5779SVedang Patel {
12354cfd5779SVedang Patel 	struct sched_entry *entry;
12364cfd5779SVedang Patel 	u32 interval = 0;
12374cfd5779SVedang Patel 
12384cfd5779SVedang Patel 	list_for_each_entry(entry, &sched->entries, list) {
12394cfd5779SVedang Patel 		entry->next_txtime = ktime_add_ns(base, interval);
12404cfd5779SVedang Patel 		interval += entry->interval;
12414cfd5779SVedang Patel 	}
12424cfd5779SVedang Patel }
12434cfd5779SVedang Patel 
12449c66d156SVinicius Costa Gomes static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries)
12459c66d156SVinicius Costa Gomes {
12469c66d156SVinicius Costa Gomes 	struct __tc_taprio_qopt_offload *__offload;
12479c66d156SVinicius Costa Gomes 
124811a33de2SGustavo A. R. Silva 	__offload = kzalloc(struct_size(__offload, offload.entries, num_entries),
124911a33de2SGustavo A. R. Silva 			    GFP_KERNEL);
12509c66d156SVinicius Costa Gomes 	if (!__offload)
12519c66d156SVinicius Costa Gomes 		return NULL;
12529c66d156SVinicius Costa Gomes 
12539c66d156SVinicius Costa Gomes 	refcount_set(&__offload->users, 1);
12549c66d156SVinicius Costa Gomes 
12559c66d156SVinicius Costa Gomes 	return &__offload->offload;
12569c66d156SVinicius Costa Gomes }
12579c66d156SVinicius Costa Gomes 
12589c66d156SVinicius Costa Gomes struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
12599c66d156SVinicius Costa Gomes 						  *offload)
12609c66d156SVinicius Costa Gomes {
12619c66d156SVinicius Costa Gomes 	struct __tc_taprio_qopt_offload *__offload;
12629c66d156SVinicius Costa Gomes 
12639c66d156SVinicius Costa Gomes 	__offload = container_of(offload, struct __tc_taprio_qopt_offload,
12649c66d156SVinicius Costa Gomes 				 offload);
12659c66d156SVinicius Costa Gomes 
12669c66d156SVinicius Costa Gomes 	refcount_inc(&__offload->users);
12679c66d156SVinicius Costa Gomes 
12689c66d156SVinicius Costa Gomes 	return offload;
12699c66d156SVinicius Costa Gomes }
12709c66d156SVinicius Costa Gomes EXPORT_SYMBOL_GPL(taprio_offload_get);
12719c66d156SVinicius Costa Gomes 
12729c66d156SVinicius Costa Gomes void taprio_offload_free(struct tc_taprio_qopt_offload *offload)
12739c66d156SVinicius Costa Gomes {
12749c66d156SVinicius Costa Gomes 	struct __tc_taprio_qopt_offload *__offload;
12759c66d156SVinicius Costa Gomes 
12769c66d156SVinicius Costa Gomes 	__offload = container_of(offload, struct __tc_taprio_qopt_offload,
12779c66d156SVinicius Costa Gomes 				 offload);
12789c66d156SVinicius Costa Gomes 
12799c66d156SVinicius Costa Gomes 	if (!refcount_dec_and_test(&__offload->users))
12809c66d156SVinicius Costa Gomes 		return;
12819c66d156SVinicius Costa Gomes 
12829c66d156SVinicius Costa Gomes 	kfree(__offload);
12839c66d156SVinicius Costa Gomes }
12849c66d156SVinicius Costa Gomes EXPORT_SYMBOL_GPL(taprio_offload_free);
12859c66d156SVinicius Costa Gomes 
12869c66d156SVinicius Costa Gomes /* The function will only serve to keep the pointers to the "oper" and "admin"
12879c66d156SVinicius Costa Gomes  * schedules valid in relation to their base times, so when calling dump() the
12889c66d156SVinicius Costa Gomes  * users looks at the right schedules.
12899c66d156SVinicius Costa Gomes  * When using full offload, the admin configuration is promoted to oper at the
12909c66d156SVinicius Costa Gomes  * base_time in the PHC time domain.  But because the system time is not
12919c66d156SVinicius Costa Gomes  * necessarily in sync with that, we can't just trigger a hrtimer to call
12929c66d156SVinicius Costa Gomes  * switch_schedules at the right hardware time.
12939c66d156SVinicius Costa Gomes  * At the moment we call this by hand right away from taprio, but in the future
12949c66d156SVinicius Costa Gomes  * it will be useful to create a mechanism for drivers to notify taprio of the
12959c66d156SVinicius Costa Gomes  * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump().
12969c66d156SVinicius Costa Gomes  * This is left as TODO.
12979c66d156SVinicius Costa Gomes  */
1298d665c128SYi Wang static void taprio_offload_config_changed(struct taprio_sched *q)
12999c66d156SVinicius Costa Gomes {
13009c66d156SVinicius Costa Gomes 	struct sched_gate_list *oper, *admin;
13019c66d156SVinicius Costa Gomes 
1302c8cbe123SVladimir Oltean 	oper = rtnl_dereference(q->oper_sched);
1303c8cbe123SVladimir Oltean 	admin = rtnl_dereference(q->admin_sched);
13049c66d156SVinicius Costa Gomes 
13059c66d156SVinicius Costa Gomes 	switch_schedules(q, &admin, &oper);
13069c66d156SVinicius Costa Gomes }
13079c66d156SVinicius Costa Gomes 
130809e31cf0SVinicius Costa Gomes static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask)
130909e31cf0SVinicius Costa Gomes {
131009e31cf0SVinicius Costa Gomes 	u32 i, queue_mask = 0;
131109e31cf0SVinicius Costa Gomes 
131209e31cf0SVinicius Costa Gomes 	for (i = 0; i < dev->num_tc; i++) {
131309e31cf0SVinicius Costa Gomes 		u32 offset, count;
131409e31cf0SVinicius Costa Gomes 
131509e31cf0SVinicius Costa Gomes 		if (!(tc_mask & BIT(i)))
131609e31cf0SVinicius Costa Gomes 			continue;
131709e31cf0SVinicius Costa Gomes 
131809e31cf0SVinicius Costa Gomes 		offset = dev->tc_to_txq[i].offset;
131909e31cf0SVinicius Costa Gomes 		count = dev->tc_to_txq[i].count;
132009e31cf0SVinicius Costa Gomes 
132109e31cf0SVinicius Costa Gomes 		queue_mask |= GENMASK(offset + count - 1, offset);
132209e31cf0SVinicius Costa Gomes 	}
132309e31cf0SVinicius Costa Gomes 
132409e31cf0SVinicius Costa Gomes 	return queue_mask;
132509e31cf0SVinicius Costa Gomes }
132609e31cf0SVinicius Costa Gomes 
132709e31cf0SVinicius Costa Gomes static void taprio_sched_to_offload(struct net_device *dev,
13289c66d156SVinicius Costa Gomes 				    struct sched_gate_list *sched,
1329522d15eaSVladimir Oltean 				    struct tc_taprio_qopt_offload *offload,
1330522d15eaSVladimir Oltean 				    const struct tc_taprio_caps *caps)
13319c66d156SVinicius Costa Gomes {
13329c66d156SVinicius Costa Gomes 	struct sched_entry *entry;
13339c66d156SVinicius Costa Gomes 	int i = 0;
13349c66d156SVinicius Costa Gomes 
13359c66d156SVinicius Costa Gomes 	offload->base_time = sched->base_time;
13369c66d156SVinicius Costa Gomes 	offload->cycle_time = sched->cycle_time;
13379c66d156SVinicius Costa Gomes 	offload->cycle_time_extension = sched->cycle_time_extension;
13389c66d156SVinicius Costa Gomes 
13399c66d156SVinicius Costa Gomes 	list_for_each_entry(entry, &sched->entries, list) {
13409c66d156SVinicius Costa Gomes 		struct tc_taprio_sched_entry *e = &offload->entries[i];
13419c66d156SVinicius Costa Gomes 
13429c66d156SVinicius Costa Gomes 		e->command = entry->command;
13439c66d156SVinicius Costa Gomes 		e->interval = entry->interval;
1344522d15eaSVladimir Oltean 		if (caps->gate_mask_per_txq)
1345522d15eaSVladimir Oltean 			e->gate_mask = tc_map_to_queue_mask(dev,
1346522d15eaSVladimir Oltean 							    entry->gate_mask);
1347522d15eaSVladimir Oltean 		else
1348522d15eaSVladimir Oltean 			e->gate_mask = entry->gate_mask;
134909e31cf0SVinicius Costa Gomes 
13509c66d156SVinicius Costa Gomes 		i++;
13519c66d156SVinicius Costa Gomes 	}
13529c66d156SVinicius Costa Gomes 
13539c66d156SVinicius Costa Gomes 	offload->num_entries = i;
13549c66d156SVinicius Costa Gomes }
13559c66d156SVinicius Costa Gomes 
13562f530df7SVladimir Oltean static void taprio_detect_broken_mqprio(struct taprio_sched *q)
13572f530df7SVladimir Oltean {
13582f530df7SVladimir Oltean 	struct net_device *dev = qdisc_dev(q->root);
13592f530df7SVladimir Oltean 	struct tc_taprio_caps caps;
13602f530df7SVladimir Oltean 
13612f530df7SVladimir Oltean 	qdisc_offload_query_caps(dev, TC_SETUP_QDISC_TAPRIO,
13622f530df7SVladimir Oltean 				 &caps, sizeof(caps));
13632f530df7SVladimir Oltean 
13642f530df7SVladimir Oltean 	q->broken_mqprio = caps.broken_mqprio;
13652f530df7SVladimir Oltean 	if (q->broken_mqprio)
13662f530df7SVladimir Oltean 		static_branch_inc(&taprio_have_broken_mqprio);
13672f530df7SVladimir Oltean 	else
13682f530df7SVladimir Oltean 		static_branch_inc(&taprio_have_working_mqprio);
13692f530df7SVladimir Oltean 
13702f530df7SVladimir Oltean 	q->detected_mqprio = true;
13712f530df7SVladimir Oltean }
13722f530df7SVladimir Oltean 
13732f530df7SVladimir Oltean static void taprio_cleanup_broken_mqprio(struct taprio_sched *q)
13742f530df7SVladimir Oltean {
13752f530df7SVladimir Oltean 	if (!q->detected_mqprio)
13762f530df7SVladimir Oltean 		return;
13772f530df7SVladimir Oltean 
13782f530df7SVladimir Oltean 	if (q->broken_mqprio)
13792f530df7SVladimir Oltean 		static_branch_dec(&taprio_have_broken_mqprio);
13802f530df7SVladimir Oltean 	else
13812f530df7SVladimir Oltean 		static_branch_dec(&taprio_have_working_mqprio);
13822f530df7SVladimir Oltean }
13832f530df7SVladimir Oltean 
13849c66d156SVinicius Costa Gomes static int taprio_enable_offload(struct net_device *dev,
13859c66d156SVinicius Costa Gomes 				 struct taprio_sched *q,
13869c66d156SVinicius Costa Gomes 				 struct sched_gate_list *sched,
13879c66d156SVinicius Costa Gomes 				 struct netlink_ext_ack *extack)
13889c66d156SVinicius Costa Gomes {
13899c66d156SVinicius Costa Gomes 	const struct net_device_ops *ops = dev->netdev_ops;
13909c66d156SVinicius Costa Gomes 	struct tc_taprio_qopt_offload *offload;
1391a54fc09eSVladimir Oltean 	struct tc_taprio_caps caps;
1392a54fc09eSVladimir Oltean 	int tc, err = 0;
13939c66d156SVinicius Costa Gomes 
13949c66d156SVinicius Costa Gomes 	if (!ops->ndo_setup_tc) {
13959c66d156SVinicius Costa Gomes 		NL_SET_ERR_MSG(extack,
13969c66d156SVinicius Costa Gomes 			       "Device does not support taprio offload");
13979c66d156SVinicius Costa Gomes 		return -EOPNOTSUPP;
13989c66d156SVinicius Costa Gomes 	}
13999c66d156SVinicius Costa Gomes 
1400a54fc09eSVladimir Oltean 	qdisc_offload_query_caps(dev, TC_SETUP_QDISC_TAPRIO,
1401a54fc09eSVladimir Oltean 				 &caps, sizeof(caps));
1402a54fc09eSVladimir Oltean 
1403a54fc09eSVladimir Oltean 	if (!caps.supports_queue_max_sdu) {
1404a54fc09eSVladimir Oltean 		for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
1405a54fc09eSVladimir Oltean 			if (q->max_sdu[tc]) {
1406a54fc09eSVladimir Oltean 				NL_SET_ERR_MSG_MOD(extack,
1407a54fc09eSVladimir Oltean 						   "Device does not handle queueMaxSDU");
1408a54fc09eSVladimir Oltean 				return -EOPNOTSUPP;
1409a54fc09eSVladimir Oltean 			}
1410a54fc09eSVladimir Oltean 		}
1411a54fc09eSVladimir Oltean 	}
1412a54fc09eSVladimir Oltean 
14139c66d156SVinicius Costa Gomes 	offload = taprio_offload_alloc(sched->num_entries);
14149c66d156SVinicius Costa Gomes 	if (!offload) {
14159c66d156SVinicius Costa Gomes 		NL_SET_ERR_MSG(extack,
14169c66d156SVinicius Costa Gomes 			       "Not enough memory for enabling offload mode");
14179c66d156SVinicius Costa Gomes 		return -ENOMEM;
14189c66d156SVinicius Costa Gomes 	}
14199c66d156SVinicius Costa Gomes 	offload->enable = 1;
142009c794c0SVladimir Oltean 	mqprio_qopt_reconstruct(dev, &offload->mqprio.qopt);
1421522d15eaSVladimir Oltean 	taprio_sched_to_offload(dev, sched, offload, &caps);
14229c66d156SVinicius Costa Gomes 
1423a54fc09eSVladimir Oltean 	for (tc = 0; tc < TC_MAX_QUEUE; tc++)
1424a54fc09eSVladimir Oltean 		offload->max_sdu[tc] = q->max_sdu[tc];
1425a54fc09eSVladimir Oltean 
14269c66d156SVinicius Costa Gomes 	err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
14279c66d156SVinicius Costa Gomes 	if (err < 0) {
14289c66d156SVinicius Costa Gomes 		NL_SET_ERR_MSG(extack,
14299c66d156SVinicius Costa Gomes 			       "Device failed to setup taprio offload");
14309c66d156SVinicius Costa Gomes 		goto done;
14319c66d156SVinicius Costa Gomes 	}
14329c66d156SVinicius Costa Gomes 
1433db46e3a8SVladimir Oltean 	q->offloaded = true;
1434db46e3a8SVladimir Oltean 
14359c66d156SVinicius Costa Gomes done:
14369c66d156SVinicius Costa Gomes 	taprio_offload_free(offload);
14379c66d156SVinicius Costa Gomes 
14389c66d156SVinicius Costa Gomes 	return err;
14399c66d156SVinicius Costa Gomes }
14409c66d156SVinicius Costa Gomes 
14419c66d156SVinicius Costa Gomes static int taprio_disable_offload(struct net_device *dev,
14429c66d156SVinicius Costa Gomes 				  struct taprio_sched *q,
14439c66d156SVinicius Costa Gomes 				  struct netlink_ext_ack *extack)
14449c66d156SVinicius Costa Gomes {
14459c66d156SVinicius Costa Gomes 	const struct net_device_ops *ops = dev->netdev_ops;
14469c66d156SVinicius Costa Gomes 	struct tc_taprio_qopt_offload *offload;
14479c66d156SVinicius Costa Gomes 	int err;
14489c66d156SVinicius Costa Gomes 
1449db46e3a8SVladimir Oltean 	if (!q->offloaded)
14509c66d156SVinicius Costa Gomes 		return 0;
14519c66d156SVinicius Costa Gomes 
14529c66d156SVinicius Costa Gomes 	offload = taprio_offload_alloc(0);
14539c66d156SVinicius Costa Gomes 	if (!offload) {
14549c66d156SVinicius Costa Gomes 		NL_SET_ERR_MSG(extack,
14559c66d156SVinicius Costa Gomes 			       "Not enough memory to disable offload mode");
14569c66d156SVinicius Costa Gomes 		return -ENOMEM;
14579c66d156SVinicius Costa Gomes 	}
14589c66d156SVinicius Costa Gomes 	offload->enable = 0;
14599c66d156SVinicius Costa Gomes 
14609c66d156SVinicius Costa Gomes 	err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
14619c66d156SVinicius Costa Gomes 	if (err < 0) {
14629c66d156SVinicius Costa Gomes 		NL_SET_ERR_MSG(extack,
14639c66d156SVinicius Costa Gomes 			       "Device failed to disable offload");
14649c66d156SVinicius Costa Gomes 		goto out;
14659c66d156SVinicius Costa Gomes 	}
14669c66d156SVinicius Costa Gomes 
1467db46e3a8SVladimir Oltean 	q->offloaded = false;
1468db46e3a8SVladimir Oltean 
14699c66d156SVinicius Costa Gomes out:
14709c66d156SVinicius Costa Gomes 	taprio_offload_free(offload);
14719c66d156SVinicius Costa Gomes 
14729c66d156SVinicius Costa Gomes 	return err;
14739c66d156SVinicius Costa Gomes }
14749c66d156SVinicius Costa Gomes 
14759c66d156SVinicius Costa Gomes /* If full offload is enabled, the only possible clockid is the net device's
14769c66d156SVinicius Costa Gomes  * PHC. For that reason, specifying a clockid through netlink is incorrect.
14779c66d156SVinicius Costa Gomes  * For txtime-assist, it is implicitly assumed that the device's PHC is kept
14789c66d156SVinicius Costa Gomes  * in sync with the specified clockid via a user space daemon such as phc2sys.
14799c66d156SVinicius Costa Gomes  * For both software taprio and txtime-assist, the clockid is used for the
14809c66d156SVinicius Costa Gomes  * hrtimer that advances the schedule and hence mandatory.
14819c66d156SVinicius Costa Gomes  */
14829c66d156SVinicius Costa Gomes static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
14839c66d156SVinicius Costa Gomes 				struct netlink_ext_ack *extack)
14849c66d156SVinicius Costa Gomes {
14859c66d156SVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
14869c66d156SVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
14879c66d156SVinicius Costa Gomes 	int err = -EINVAL;
14889c66d156SVinicius Costa Gomes 
14899c66d156SVinicius Costa Gomes 	if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
14909c66d156SVinicius Costa Gomes 		const struct ethtool_ops *ops = dev->ethtool_ops;
14919c66d156SVinicius Costa Gomes 		struct ethtool_ts_info info = {
14929c66d156SVinicius Costa Gomes 			.cmd = ETHTOOL_GET_TS_INFO,
14939c66d156SVinicius Costa Gomes 			.phc_index = -1,
14949c66d156SVinicius Costa Gomes 		};
14959c66d156SVinicius Costa Gomes 
14969c66d156SVinicius Costa Gomes 		if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
14979c66d156SVinicius Costa Gomes 			NL_SET_ERR_MSG(extack,
14989c66d156SVinicius Costa Gomes 				       "The 'clockid' cannot be specified for full offload");
14999c66d156SVinicius Costa Gomes 			goto out;
15009c66d156SVinicius Costa Gomes 		}
15019c66d156SVinicius Costa Gomes 
15029c66d156SVinicius Costa Gomes 		if (ops && ops->get_ts_info)
15039c66d156SVinicius Costa Gomes 			err = ops->get_ts_info(dev, &info);
15049c66d156SVinicius Costa Gomes 
15059c66d156SVinicius Costa Gomes 		if (err || info.phc_index < 0) {
15069c66d156SVinicius Costa Gomes 			NL_SET_ERR_MSG(extack,
15079c66d156SVinicius Costa Gomes 				       "Device does not have a PTP clock");
15089c66d156SVinicius Costa Gomes 			err = -ENOTSUPP;
15099c66d156SVinicius Costa Gomes 			goto out;
15109c66d156SVinicius Costa Gomes 		}
15119c66d156SVinicius Costa Gomes 	} else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
15129c66d156SVinicius Costa Gomes 		int clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
15136dc25401SEric Dumazet 		enum tk_offsets tk_offset;
15149c66d156SVinicius Costa Gomes 
15159c66d156SVinicius Costa Gomes 		/* We only support static clockids and we don't allow
15169c66d156SVinicius Costa Gomes 		 * for it to be modified after the first init.
15179c66d156SVinicius Costa Gomes 		 */
15189c66d156SVinicius Costa Gomes 		if (clockid < 0 ||
15199c66d156SVinicius Costa Gomes 		    (q->clockid != -1 && q->clockid != clockid)) {
15209c66d156SVinicius Costa Gomes 			NL_SET_ERR_MSG(extack,
15219c66d156SVinicius Costa Gomes 				       "Changing the 'clockid' of a running schedule is not supported");
15229c66d156SVinicius Costa Gomes 			err = -ENOTSUPP;
15239c66d156SVinicius Costa Gomes 			goto out;
15249c66d156SVinicius Costa Gomes 		}
15259c66d156SVinicius Costa Gomes 
15269c66d156SVinicius Costa Gomes 		switch (clockid) {
15279c66d156SVinicius Costa Gomes 		case CLOCK_REALTIME:
15286dc25401SEric Dumazet 			tk_offset = TK_OFFS_REAL;
15299c66d156SVinicius Costa Gomes 			break;
15309c66d156SVinicius Costa Gomes 		case CLOCK_MONOTONIC:
15316dc25401SEric Dumazet 			tk_offset = TK_OFFS_MAX;
15329c66d156SVinicius Costa Gomes 			break;
15339c66d156SVinicius Costa Gomes 		case CLOCK_BOOTTIME:
15346dc25401SEric Dumazet 			tk_offset = TK_OFFS_BOOT;
15359c66d156SVinicius Costa Gomes 			break;
15369c66d156SVinicius Costa Gomes 		case CLOCK_TAI:
15376dc25401SEric Dumazet 			tk_offset = TK_OFFS_TAI;
15389c66d156SVinicius Costa Gomes 			break;
15399c66d156SVinicius Costa Gomes 		default:
15409c66d156SVinicius Costa Gomes 			NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
15419c66d156SVinicius Costa Gomes 			err = -EINVAL;
15429c66d156SVinicius Costa Gomes 			goto out;
15439c66d156SVinicius Costa Gomes 		}
15446dc25401SEric Dumazet 		/* This pairs with READ_ONCE() in taprio_mono_to_any */
15456dc25401SEric Dumazet 		WRITE_ONCE(q->tk_offset, tk_offset);
15469c66d156SVinicius Costa Gomes 
15479c66d156SVinicius Costa Gomes 		q->clockid = clockid;
15489c66d156SVinicius Costa Gomes 	} else {
15499c66d156SVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
15509c66d156SVinicius Costa Gomes 		goto out;
15519c66d156SVinicius Costa Gomes 	}
1552a954380aSVinicius Costa Gomes 
1553a954380aSVinicius Costa Gomes 	/* Everything went ok, return success. */
1554a954380aSVinicius Costa Gomes 	err = 0;
1555a954380aSVinicius Costa Gomes 
15569c66d156SVinicius Costa Gomes out:
15579c66d156SVinicius Costa Gomes 	return err;
15589c66d156SVinicius Costa Gomes }
15599c66d156SVinicius Costa Gomes 
1560a54fc09eSVladimir Oltean static int taprio_parse_tc_entry(struct Qdisc *sch,
1561a54fc09eSVladimir Oltean 				 struct nlattr *opt,
1562a54fc09eSVladimir Oltean 				 u32 max_sdu[TC_QOPT_MAX_QUEUE],
1563a54fc09eSVladimir Oltean 				 unsigned long *seen_tcs,
1564a54fc09eSVladimir Oltean 				 struct netlink_ext_ack *extack)
1565a54fc09eSVladimir Oltean {
1566a54fc09eSVladimir Oltean 	struct nlattr *tb[TCA_TAPRIO_TC_ENTRY_MAX + 1] = { };
1567a54fc09eSVladimir Oltean 	struct net_device *dev = qdisc_dev(sch);
1568a54fc09eSVladimir Oltean 	u32 val = 0;
1569a54fc09eSVladimir Oltean 	int err, tc;
1570a54fc09eSVladimir Oltean 
1571a54fc09eSVladimir Oltean 	err = nla_parse_nested(tb, TCA_TAPRIO_TC_ENTRY_MAX, opt,
1572a54fc09eSVladimir Oltean 			       taprio_tc_policy, extack);
1573a54fc09eSVladimir Oltean 	if (err < 0)
1574a54fc09eSVladimir Oltean 		return err;
1575a54fc09eSVladimir Oltean 
1576a54fc09eSVladimir Oltean 	if (!tb[TCA_TAPRIO_TC_ENTRY_INDEX]) {
1577a54fc09eSVladimir Oltean 		NL_SET_ERR_MSG_MOD(extack, "TC entry index missing");
1578a54fc09eSVladimir Oltean 		return -EINVAL;
1579a54fc09eSVladimir Oltean 	}
1580a54fc09eSVladimir Oltean 
1581a54fc09eSVladimir Oltean 	tc = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_INDEX]);
1582a54fc09eSVladimir Oltean 	if (tc >= TC_QOPT_MAX_QUEUE) {
1583a54fc09eSVladimir Oltean 		NL_SET_ERR_MSG_MOD(extack, "TC entry index out of range");
1584a54fc09eSVladimir Oltean 		return -ERANGE;
1585a54fc09eSVladimir Oltean 	}
1586a54fc09eSVladimir Oltean 
1587a54fc09eSVladimir Oltean 	if (*seen_tcs & BIT(tc)) {
1588a54fc09eSVladimir Oltean 		NL_SET_ERR_MSG_MOD(extack, "Duplicate TC entry");
1589a54fc09eSVladimir Oltean 		return -EINVAL;
1590a54fc09eSVladimir Oltean 	}
1591a54fc09eSVladimir Oltean 
1592a54fc09eSVladimir Oltean 	*seen_tcs |= BIT(tc);
1593a54fc09eSVladimir Oltean 
1594a54fc09eSVladimir Oltean 	if (tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU])
1595a54fc09eSVladimir Oltean 		val = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]);
1596a54fc09eSVladimir Oltean 
1597a54fc09eSVladimir Oltean 	if (val > dev->max_mtu) {
1598a54fc09eSVladimir Oltean 		NL_SET_ERR_MSG_MOD(extack, "TC max SDU exceeds device max MTU");
1599a54fc09eSVladimir Oltean 		return -ERANGE;
1600a54fc09eSVladimir Oltean 	}
1601a54fc09eSVladimir Oltean 
1602a54fc09eSVladimir Oltean 	max_sdu[tc] = val;
1603a54fc09eSVladimir Oltean 
1604a54fc09eSVladimir Oltean 	return 0;
1605a54fc09eSVladimir Oltean }
1606a54fc09eSVladimir Oltean 
1607a54fc09eSVladimir Oltean static int taprio_parse_tc_entries(struct Qdisc *sch,
1608a54fc09eSVladimir Oltean 				   struct nlattr *opt,
1609a54fc09eSVladimir Oltean 				   struct netlink_ext_ack *extack)
1610a54fc09eSVladimir Oltean {
1611a54fc09eSVladimir Oltean 	struct taprio_sched *q = qdisc_priv(sch);
1612a54fc09eSVladimir Oltean 	struct net_device *dev = qdisc_dev(sch);
1613a54fc09eSVladimir Oltean 	u32 max_sdu[TC_QOPT_MAX_QUEUE];
1614a54fc09eSVladimir Oltean 	unsigned long seen_tcs = 0;
1615a54fc09eSVladimir Oltean 	struct nlattr *n;
1616a54fc09eSVladimir Oltean 	int tc, rem;
1617a54fc09eSVladimir Oltean 	int err = 0;
1618a54fc09eSVladimir Oltean 
1619a54fc09eSVladimir Oltean 	for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
1620a54fc09eSVladimir Oltean 		max_sdu[tc] = q->max_sdu[tc];
1621a54fc09eSVladimir Oltean 
1622a54fc09eSVladimir Oltean 	nla_for_each_nested(n, opt, rem) {
1623a54fc09eSVladimir Oltean 		if (nla_type(n) != TCA_TAPRIO_ATTR_TC_ENTRY)
1624a54fc09eSVladimir Oltean 			continue;
1625a54fc09eSVladimir Oltean 
1626a54fc09eSVladimir Oltean 		err = taprio_parse_tc_entry(sch, n, max_sdu, &seen_tcs, extack);
1627a54fc09eSVladimir Oltean 		if (err)
1628a54fc09eSVladimir Oltean 			goto out;
1629a54fc09eSVladimir Oltean 	}
1630a54fc09eSVladimir Oltean 
1631a54fc09eSVladimir Oltean 	for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
1632a54fc09eSVladimir Oltean 		q->max_sdu[tc] = max_sdu[tc];
1633a54fc09eSVladimir Oltean 		if (max_sdu[tc])
1634a54fc09eSVladimir Oltean 			q->max_frm_len[tc] = max_sdu[tc] + dev->hard_header_len;
1635a54fc09eSVladimir Oltean 		else
1636a54fc09eSVladimir Oltean 			q->max_frm_len[tc] = U32_MAX; /* never oversized */
1637a54fc09eSVladimir Oltean 	}
1638a54fc09eSVladimir Oltean 
1639a54fc09eSVladimir Oltean out:
1640a54fc09eSVladimir Oltean 	return err;
1641a54fc09eSVladimir Oltean }
1642a54fc09eSVladimir Oltean 
1643b5a0faa3SIvan Khoronzhuk static int taprio_mqprio_cmp(const struct net_device *dev,
1644b5a0faa3SIvan Khoronzhuk 			     const struct tc_mqprio_qopt *mqprio)
1645b5a0faa3SIvan Khoronzhuk {
1646b5a0faa3SIvan Khoronzhuk 	int i;
1647b5a0faa3SIvan Khoronzhuk 
1648b5a0faa3SIvan Khoronzhuk 	if (!mqprio || mqprio->num_tc != dev->num_tc)
1649b5a0faa3SIvan Khoronzhuk 		return -1;
1650b5a0faa3SIvan Khoronzhuk 
1651b5a0faa3SIvan Khoronzhuk 	for (i = 0; i < mqprio->num_tc; i++)
1652b5a0faa3SIvan Khoronzhuk 		if (dev->tc_to_txq[i].count != mqprio->count[i] ||
1653b5a0faa3SIvan Khoronzhuk 		    dev->tc_to_txq[i].offset != mqprio->offset[i])
1654b5a0faa3SIvan Khoronzhuk 			return -1;
1655b5a0faa3SIvan Khoronzhuk 
1656b5a0faa3SIvan Khoronzhuk 	for (i = 0; i <= TC_BITMASK; i++)
1657b5a0faa3SIvan Khoronzhuk 		if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i])
1658b5a0faa3SIvan Khoronzhuk 			return -1;
1659b5a0faa3SIvan Khoronzhuk 
1660b5a0faa3SIvan Khoronzhuk 	return 0;
1661b5a0faa3SIvan Khoronzhuk }
1662b5a0faa3SIvan Khoronzhuk 
1663a9d62274SVinicius Costa Gomes /* The semantics of the 'flags' argument in relation to 'change()'
1664a9d62274SVinicius Costa Gomes  * requests, are interpreted following two rules (which are applied in
1665a9d62274SVinicius Costa Gomes  * this order): (1) an omitted 'flags' argument is interpreted as
1666a9d62274SVinicius Costa Gomes  * zero; (2) the 'flags' of a "running" taprio instance cannot be
1667a9d62274SVinicius Costa Gomes  * changed.
1668a9d62274SVinicius Costa Gomes  */
1669a9d62274SVinicius Costa Gomes static int taprio_new_flags(const struct nlattr *attr, u32 old,
1670a9d62274SVinicius Costa Gomes 			    struct netlink_ext_ack *extack)
1671a9d62274SVinicius Costa Gomes {
1672a9d62274SVinicius Costa Gomes 	u32 new = 0;
1673a9d62274SVinicius Costa Gomes 
1674a9d62274SVinicius Costa Gomes 	if (attr)
1675a9d62274SVinicius Costa Gomes 		new = nla_get_u32(attr);
1676a9d62274SVinicius Costa Gomes 
1677a9d62274SVinicius Costa Gomes 	if (old != TAPRIO_FLAGS_INVALID && old != new) {
1678a9d62274SVinicius Costa Gomes 		NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
1679a9d62274SVinicius Costa Gomes 		return -EOPNOTSUPP;
1680a9d62274SVinicius Costa Gomes 	}
1681a9d62274SVinicius Costa Gomes 
1682a9d62274SVinicius Costa Gomes 	if (!taprio_flags_valid(new)) {
1683a9d62274SVinicius Costa Gomes 		NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
1684a9d62274SVinicius Costa Gomes 		return -EINVAL;
1685a9d62274SVinicius Costa Gomes 	}
1686a9d62274SVinicius Costa Gomes 
1687a9d62274SVinicius Costa Gomes 	return new;
1688a9d62274SVinicius Costa Gomes }
1689a9d62274SVinicius Costa Gomes 
16905a781ccbSVinicius Costa Gomes static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
16915a781ccbSVinicius Costa Gomes 			 struct netlink_ext_ack *extack)
16925a781ccbSVinicius Costa Gomes {
16935a781ccbSVinicius Costa Gomes 	struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
1694a3d43c0dSVinicius Costa Gomes 	struct sched_gate_list *oper, *admin, *new_admin;
16955a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
16965a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
16975a781ccbSVinicius Costa Gomes 	struct tc_mqprio_qopt *mqprio = NULL;
1698a3d43c0dSVinicius Costa Gomes 	unsigned long flags;
16995a781ccbSVinicius Costa Gomes 	ktime_t start;
17009c66d156SVinicius Costa Gomes 	int i, err;
17015a781ccbSVinicius Costa Gomes 
17028cb08174SJohannes Berg 	err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt,
17035a781ccbSVinicius Costa Gomes 					  taprio_policy, extack);
17045a781ccbSVinicius Costa Gomes 	if (err < 0)
17055a781ccbSVinicius Costa Gomes 		return err;
17065a781ccbSVinicius Costa Gomes 
17075a781ccbSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
17085a781ccbSVinicius Costa Gomes 		mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
17095a781ccbSVinicius Costa Gomes 
1710a9d62274SVinicius Costa Gomes 	err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS],
1711a9d62274SVinicius Costa Gomes 			       q->flags, extack);
1712a9d62274SVinicius Costa Gomes 	if (err < 0)
1713a9d62274SVinicius Costa Gomes 		return err;
17144cfd5779SVedang Patel 
1715a9d62274SVinicius Costa Gomes 	q->flags = err;
17164cfd5779SVedang Patel 
1717a9d62274SVinicius Costa Gomes 	err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
17185a781ccbSVinicius Costa Gomes 	if (err < 0)
17195a781ccbSVinicius Costa Gomes 		return err;
17205a781ccbSVinicius Costa Gomes 
1721a54fc09eSVladimir Oltean 	err = taprio_parse_tc_entries(sch, opt, extack);
1722a54fc09eSVladimir Oltean 	if (err)
1723a54fc09eSVladimir Oltean 		return err;
1724a54fc09eSVladimir Oltean 
1725a3d43c0dSVinicius Costa Gomes 	new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL);
1726a3d43c0dSVinicius Costa Gomes 	if (!new_admin) {
1727a3d43c0dSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule");
1728a3d43c0dSVinicius Costa Gomes 		return -ENOMEM;
1729a3d43c0dSVinicius Costa Gomes 	}
1730a3d43c0dSVinicius Costa Gomes 	INIT_LIST_HEAD(&new_admin->entries);
17315a781ccbSVinicius Costa Gomes 
173218cdd2f0SVladimir Oltean 	oper = rtnl_dereference(q->oper_sched);
173318cdd2f0SVladimir Oltean 	admin = rtnl_dereference(q->admin_sched);
17345a781ccbSVinicius Costa Gomes 
1735b5a0faa3SIvan Khoronzhuk 	/* no changes - no new mqprio settings */
1736b5a0faa3SIvan Khoronzhuk 	if (!taprio_mqprio_cmp(dev, mqprio))
1737b5a0faa3SIvan Khoronzhuk 		mqprio = NULL;
1738b5a0faa3SIvan Khoronzhuk 
1739a3d43c0dSVinicius Costa Gomes 	if (mqprio && (oper || admin)) {
1740a3d43c0dSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported");
1741a3d43c0dSVinicius Costa Gomes 		err = -ENOTSUPP;
1742a3d43c0dSVinicius Costa Gomes 		goto free_sched;
17435a781ccbSVinicius Costa Gomes 	}
17445a781ccbSVinicius Costa Gomes 
1745b5b73b26SVinicius Costa Gomes 	err = parse_taprio_schedule(q, tb, new_admin, extack);
1746a3d43c0dSVinicius Costa Gomes 	if (err < 0)
1747a3d43c0dSVinicius Costa Gomes 		goto free_sched;
17485a781ccbSVinicius Costa Gomes 
1749a3d43c0dSVinicius Costa Gomes 	if (new_admin->num_entries == 0) {
1750a3d43c0dSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule");
1751a3d43c0dSVinicius Costa Gomes 		err = -EINVAL;
1752a3d43c0dSVinicius Costa Gomes 		goto free_sched;
1753a3d43c0dSVinicius Costa Gomes 	}
17545a781ccbSVinicius Costa Gomes 
17559c66d156SVinicius Costa Gomes 	err = taprio_parse_clockid(sch, tb, extack);
17569c66d156SVinicius Costa Gomes 	if (err < 0)
1757a3d43c0dSVinicius Costa Gomes 		goto free_sched;
1758a3d43c0dSVinicius Costa Gomes 
1759a3d43c0dSVinicius Costa Gomes 	taprio_set_picos_per_byte(dev, q);
1760a3d43c0dSVinicius Costa Gomes 
17615652e63dSVinicius Costa Gomes 	if (mqprio) {
1762efe487fcSHaimin Zhang 		err = netdev_set_num_tc(dev, mqprio->num_tc);
1763efe487fcSHaimin Zhang 		if (err)
1764efe487fcSHaimin Zhang 			goto free_sched;
17652f530df7SVladimir Oltean 		for (i = 0; i < mqprio->num_tc; i++) {
17665652e63dSVinicius Costa Gomes 			netdev_set_tc_queue(dev, i,
17675652e63dSVinicius Costa Gomes 					    mqprio->count[i],
17685652e63dSVinicius Costa Gomes 					    mqprio->offset[i]);
17692f530df7SVladimir Oltean 			q->cur_txq[i] = mqprio->offset[i];
17702f530df7SVladimir Oltean 		}
17715652e63dSVinicius Costa Gomes 
17725652e63dSVinicius Costa Gomes 		/* Always use supplied priority mappings */
17735652e63dSVinicius Costa Gomes 		for (i = 0; i <= TC_BITMASK; i++)
17745652e63dSVinicius Costa Gomes 			netdev_set_prio_tc_map(dev, i,
17755652e63dSVinicius Costa Gomes 					       mqprio->prio_tc_map[i]);
17765652e63dSVinicius Costa Gomes 	}
17775652e63dSVinicius Costa Gomes 
1778a9d62274SVinicius Costa Gomes 	if (FULL_OFFLOAD_IS_ENABLED(q->flags))
177909e31cf0SVinicius Costa Gomes 		err = taprio_enable_offload(dev, q, new_admin, extack);
17809c66d156SVinicius Costa Gomes 	else
17819c66d156SVinicius Costa Gomes 		err = taprio_disable_offload(dev, q, extack);
17829c66d156SVinicius Costa Gomes 	if (err)
17839c66d156SVinicius Costa Gomes 		goto free_sched;
17849c66d156SVinicius Costa Gomes 
1785a3d43c0dSVinicius Costa Gomes 	/* Protects against enqueue()/dequeue() */
1786a3d43c0dSVinicius Costa Gomes 	spin_lock_bh(qdisc_lock(sch));
1787a3d43c0dSVinicius Costa Gomes 
17884cfd5779SVedang Patel 	if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) {
17894cfd5779SVedang Patel 		if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) {
17904cfd5779SVedang Patel 			NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled");
17914cfd5779SVedang Patel 			err = -EINVAL;
17924cfd5779SVedang Patel 			goto unlock;
17934cfd5779SVedang Patel 		}
17944cfd5779SVedang Patel 
1795a5b64700SVedang Patel 		q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
17964cfd5779SVedang Patel 	}
17974cfd5779SVedang Patel 
1798a9d62274SVinicius Costa Gomes 	if (!TXTIME_ASSIST_IS_ENABLED(q->flags) &&
1799a9d62274SVinicius Costa Gomes 	    !FULL_OFFLOAD_IS_ENABLED(q->flags) &&
18004cfd5779SVedang Patel 	    !hrtimer_active(&q->advance_timer)) {
1801a3d43c0dSVinicius Costa Gomes 		hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
1802a3d43c0dSVinicius Costa Gomes 		q->advance_timer.function = advance_sched;
18035a781ccbSVinicius Costa Gomes 	}
18045a781ccbSVinicius Costa Gomes 
1805a3d43c0dSVinicius Costa Gomes 	err = taprio_get_start_time(sch, new_admin, &start);
1806a3d43c0dSVinicius Costa Gomes 	if (err < 0) {
1807a3d43c0dSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Internal error: failed get start time");
1808a3d43c0dSVinicius Costa Gomes 		goto unlock;
1809a3d43c0dSVinicius Costa Gomes 	}
18105a781ccbSVinicius Costa Gomes 
18114cfd5779SVedang Patel 	setup_txtime(q, new_admin, start);
18124cfd5779SVedang Patel 
1813bfabd41dSVinicius Costa Gomes 	if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
18144cfd5779SVedang Patel 		if (!oper) {
18154cfd5779SVedang Patel 			rcu_assign_pointer(q->oper_sched, new_admin);
18164cfd5779SVedang Patel 			err = 0;
18174cfd5779SVedang Patel 			new_admin = NULL;
18184cfd5779SVedang Patel 			goto unlock;
18194cfd5779SVedang Patel 		}
18204cfd5779SVedang Patel 
18214cfd5779SVedang Patel 		rcu_assign_pointer(q->admin_sched, new_admin);
18224cfd5779SVedang Patel 		if (admin)
18234cfd5779SVedang Patel 			call_rcu(&admin->rcu, taprio_free_sched_cb);
18244cfd5779SVedang Patel 	} else {
1825e5517551SVladimir Oltean 		setup_first_end_time(q, new_admin, start);
1826a3d43c0dSVinicius Costa Gomes 
1827a3d43c0dSVinicius Costa Gomes 		/* Protects against advance_sched() */
1828a3d43c0dSVinicius Costa Gomes 		spin_lock_irqsave(&q->current_entry_lock, flags);
1829a3d43c0dSVinicius Costa Gomes 
1830a3d43c0dSVinicius Costa Gomes 		taprio_start_sched(sch, start, new_admin);
1831a3d43c0dSVinicius Costa Gomes 
1832a3d43c0dSVinicius Costa Gomes 		rcu_assign_pointer(q->admin_sched, new_admin);
1833a3d43c0dSVinicius Costa Gomes 		if (admin)
1834a3d43c0dSVinicius Costa Gomes 			call_rcu(&admin->rcu, taprio_free_sched_cb);
1835a3d43c0dSVinicius Costa Gomes 
1836a3d43c0dSVinicius Costa Gomes 		spin_unlock_irqrestore(&q->current_entry_lock, flags);
18370763b3e8SIvan Khoronzhuk 
1838a9d62274SVinicius Costa Gomes 		if (FULL_OFFLOAD_IS_ENABLED(q->flags))
18390763b3e8SIvan Khoronzhuk 			taprio_offload_config_changed(q);
18404cfd5779SVedang Patel 	}
1841a3d43c0dSVinicius Costa Gomes 
18424cfd5779SVedang Patel 	new_admin = NULL;
1843a3d43c0dSVinicius Costa Gomes 	err = 0;
1844a3d43c0dSVinicius Costa Gomes 
1845a3d43c0dSVinicius Costa Gomes unlock:
1846a3d43c0dSVinicius Costa Gomes 	spin_unlock_bh(qdisc_lock(sch));
1847a3d43c0dSVinicius Costa Gomes 
1848a3d43c0dSVinicius Costa Gomes free_sched:
184951650d33SIvan Khoronzhuk 	if (new_admin)
185051650d33SIvan Khoronzhuk 		call_rcu(&new_admin->rcu, taprio_free_sched_cb);
1851a3d43c0dSVinicius Costa Gomes 
1852a3d43c0dSVinicius Costa Gomes 	return err;
18535a781ccbSVinicius Costa Gomes }
18545a781ccbSVinicius Costa Gomes 
185544d4775cSDavide Caratti static void taprio_reset(struct Qdisc *sch)
185644d4775cSDavide Caratti {
185744d4775cSDavide Caratti 	struct taprio_sched *q = qdisc_priv(sch);
185844d4775cSDavide Caratti 	struct net_device *dev = qdisc_dev(sch);
185944d4775cSDavide Caratti 	int i;
186044d4775cSDavide Caratti 
186144d4775cSDavide Caratti 	hrtimer_cancel(&q->advance_timer);
18623a415d59SEric Dumazet 
186344d4775cSDavide Caratti 	if (q->qdiscs) {
1864698285daSDavide Caratti 		for (i = 0; i < dev->num_tx_queues; i++)
1865698285daSDavide Caratti 			if (q->qdiscs[i])
186644d4775cSDavide Caratti 				qdisc_reset(q->qdiscs[i]);
186744d4775cSDavide Caratti 	}
186844d4775cSDavide Caratti }
186944d4775cSDavide Caratti 
18705a781ccbSVinicius Costa Gomes static void taprio_destroy(struct Qdisc *sch)
18715a781ccbSVinicius Costa Gomes {
18725a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
18735a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
18749af23657SVladimir Oltean 	struct sched_gate_list *oper, *admin;
18755a781ccbSVinicius Costa Gomes 	unsigned int i;
18765a781ccbSVinicius Costa Gomes 
18777b9eba7bSLeandro Dorileo 	list_del(&q->taprio_list);
18787b9eba7bSLeandro Dorileo 
1879a56d447fSEric Dumazet 	/* Note that taprio_reset() might not be called if an error
1880a56d447fSEric Dumazet 	 * happens in qdisc_create(), after taprio_init() has been called.
1881a56d447fSEric Dumazet 	 */
1882a56d447fSEric Dumazet 	hrtimer_cancel(&q->advance_timer);
18833a415d59SEric Dumazet 	qdisc_synchronize(sch);
18845a781ccbSVinicius Costa Gomes 
18859c66d156SVinicius Costa Gomes 	taprio_disable_offload(dev, q, NULL);
18869c66d156SVinicius Costa Gomes 
18875a781ccbSVinicius Costa Gomes 	if (q->qdiscs) {
1888698285daSDavide Caratti 		for (i = 0; i < dev->num_tx_queues; i++)
18895a781ccbSVinicius Costa Gomes 			qdisc_put(q->qdiscs[i]);
18905a781ccbSVinicius Costa Gomes 
18915a781ccbSVinicius Costa Gomes 		kfree(q->qdiscs);
18925a781ccbSVinicius Costa Gomes 	}
18935a781ccbSVinicius Costa Gomes 	q->qdiscs = NULL;
18945a781ccbSVinicius Costa Gomes 
18957c16680aSVinicius Costa Gomes 	netdev_reset_tc(dev);
18965a781ccbSVinicius Costa Gomes 
18979af23657SVladimir Oltean 	oper = rtnl_dereference(q->oper_sched);
18989af23657SVladimir Oltean 	admin = rtnl_dereference(q->admin_sched);
1899a3d43c0dSVinicius Costa Gomes 
19009af23657SVladimir Oltean 	if (oper)
19019af23657SVladimir Oltean 		call_rcu(&oper->rcu, taprio_free_sched_cb);
19029af23657SVladimir Oltean 
19039af23657SVladimir Oltean 	if (admin)
19049af23657SVladimir Oltean 		call_rcu(&admin->rcu, taprio_free_sched_cb);
19052f530df7SVladimir Oltean 
19062f530df7SVladimir Oltean 	taprio_cleanup_broken_mqprio(q);
19075a781ccbSVinicius Costa Gomes }
19085a781ccbSVinicius Costa Gomes 
19095a781ccbSVinicius Costa Gomes static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
19105a781ccbSVinicius Costa Gomes 		       struct netlink_ext_ack *extack)
19115a781ccbSVinicius Costa Gomes {
19125a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
19135a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
1914a3d43c0dSVinicius Costa Gomes 	int i;
19155a781ccbSVinicius Costa Gomes 
19165a781ccbSVinicius Costa Gomes 	spin_lock_init(&q->current_entry_lock);
19175a781ccbSVinicius Costa Gomes 
19185a781ccbSVinicius Costa Gomes 	hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
1919a3d43c0dSVinicius Costa Gomes 	q->advance_timer.function = advance_sched;
19205a781ccbSVinicius Costa Gomes 
19215a781ccbSVinicius Costa Gomes 	q->root = sch;
19225a781ccbSVinicius Costa Gomes 
19235a781ccbSVinicius Costa Gomes 	/* We only support static clockids. Use an invalid value as default
19245a781ccbSVinicius Costa Gomes 	 * and get the valid one on taprio_change().
19255a781ccbSVinicius Costa Gomes 	 */
19265a781ccbSVinicius Costa Gomes 	q->clockid = -1;
1927a9d62274SVinicius Costa Gomes 	q->flags = TAPRIO_FLAGS_INVALID;
19285a781ccbSVinicius Costa Gomes 
1929efb55222SVladimir Oltean 	list_add(&q->taprio_list, &taprio_list);
1930efb55222SVladimir Oltean 
1931026de64dSVladimir Oltean 	if (sch->parent != TC_H_ROOT) {
1932026de64dSVladimir Oltean 		NL_SET_ERR_MSG_MOD(extack, "Can only be attached as root qdisc");
19335a781ccbSVinicius Costa Gomes 		return -EOPNOTSUPP;
1934026de64dSVladimir Oltean 	}
19355a781ccbSVinicius Costa Gomes 
1936026de64dSVladimir Oltean 	if (!netif_is_multiqueue(dev)) {
1937026de64dSVladimir Oltean 		NL_SET_ERR_MSG_MOD(extack, "Multi-queue device is required");
19385a781ccbSVinicius Costa Gomes 		return -EOPNOTSUPP;
1939026de64dSVladimir Oltean 	}
19405a781ccbSVinicius Costa Gomes 
19415a781ccbSVinicius Costa Gomes 	/* pre-allocate qdisc, attachment can't fail */
19425a781ccbSVinicius Costa Gomes 	q->qdiscs = kcalloc(dev->num_tx_queues,
19435a781ccbSVinicius Costa Gomes 			    sizeof(q->qdiscs[0]),
19445a781ccbSVinicius Costa Gomes 			    GFP_KERNEL);
19455a781ccbSVinicius Costa Gomes 
19465a781ccbSVinicius Costa Gomes 	if (!q->qdiscs)
19475a781ccbSVinicius Costa Gomes 		return -ENOMEM;
19485a781ccbSVinicius Costa Gomes 
19495a781ccbSVinicius Costa Gomes 	if (!opt)
19505a781ccbSVinicius Costa Gomes 		return -EINVAL;
19515a781ccbSVinicius Costa Gomes 
1952a3d43c0dSVinicius Costa Gomes 	for (i = 0; i < dev->num_tx_queues; i++) {
1953a3d43c0dSVinicius Costa Gomes 		struct netdev_queue *dev_queue;
1954a3d43c0dSVinicius Costa Gomes 		struct Qdisc *qdisc;
1955a3d43c0dSVinicius Costa Gomes 
1956a3d43c0dSVinicius Costa Gomes 		dev_queue = netdev_get_tx_queue(dev, i);
1957a3d43c0dSVinicius Costa Gomes 		qdisc = qdisc_create_dflt(dev_queue,
1958a3d43c0dSVinicius Costa Gomes 					  &pfifo_qdisc_ops,
1959a3d43c0dSVinicius Costa Gomes 					  TC_H_MAKE(TC_H_MAJ(sch->handle),
1960a3d43c0dSVinicius Costa Gomes 						    TC_H_MIN(i + 1)),
1961a3d43c0dSVinicius Costa Gomes 					  extack);
1962a3d43c0dSVinicius Costa Gomes 		if (!qdisc)
1963a3d43c0dSVinicius Costa Gomes 			return -ENOMEM;
1964a3d43c0dSVinicius Costa Gomes 
1965a3d43c0dSVinicius Costa Gomes 		if (i < dev->real_num_tx_queues)
1966a3d43c0dSVinicius Costa Gomes 			qdisc_hash_add(qdisc, false);
1967a3d43c0dSVinicius Costa Gomes 
1968a3d43c0dSVinicius Costa Gomes 		q->qdiscs[i] = qdisc;
1969a3d43c0dSVinicius Costa Gomes 	}
1970a3d43c0dSVinicius Costa Gomes 
19712f530df7SVladimir Oltean 	taprio_detect_broken_mqprio(q);
19722f530df7SVladimir Oltean 
19735a781ccbSVinicius Costa Gomes 	return taprio_change(sch, opt, extack);
19745a781ccbSVinicius Costa Gomes }
19755a781ccbSVinicius Costa Gomes 
197613511704SYannick Vignon static void taprio_attach(struct Qdisc *sch)
197713511704SYannick Vignon {
197813511704SYannick Vignon 	struct taprio_sched *q = qdisc_priv(sch);
197913511704SYannick Vignon 	struct net_device *dev = qdisc_dev(sch);
198013511704SYannick Vignon 	unsigned int ntx;
198113511704SYannick Vignon 
198213511704SYannick Vignon 	/* Attach underlying qdisc */
198313511704SYannick Vignon 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
198413511704SYannick Vignon 		struct Qdisc *qdisc = q->qdiscs[ntx];
198513511704SYannick Vignon 		struct Qdisc *old;
198613511704SYannick Vignon 
198713511704SYannick Vignon 		if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
198813511704SYannick Vignon 			qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
198913511704SYannick Vignon 			old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
199013511704SYannick Vignon 		} else {
199113511704SYannick Vignon 			old = dev_graft_qdisc(qdisc->dev_queue, sch);
199213511704SYannick Vignon 			qdisc_refcount_inc(sch);
199313511704SYannick Vignon 		}
199413511704SYannick Vignon 		if (old)
199513511704SYannick Vignon 			qdisc_put(old);
199613511704SYannick Vignon 	}
199713511704SYannick Vignon 
199813511704SYannick Vignon 	/* access to the child qdiscs is not needed in offload mode */
199913511704SYannick Vignon 	if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
200013511704SYannick Vignon 		kfree(q->qdiscs);
200113511704SYannick Vignon 		q->qdiscs = NULL;
200213511704SYannick Vignon 	}
200313511704SYannick Vignon }
200413511704SYannick Vignon 
20055a781ccbSVinicius Costa Gomes static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
20065a781ccbSVinicius Costa Gomes 					     unsigned long cl)
20075a781ccbSVinicius Costa Gomes {
20085a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
20095a781ccbSVinicius Costa Gomes 	unsigned long ntx = cl - 1;
20105a781ccbSVinicius Costa Gomes 
20115a781ccbSVinicius Costa Gomes 	if (ntx >= dev->num_tx_queues)
20125a781ccbSVinicius Costa Gomes 		return NULL;
20135a781ccbSVinicius Costa Gomes 
20145a781ccbSVinicius Costa Gomes 	return netdev_get_tx_queue(dev, ntx);
20155a781ccbSVinicius Costa Gomes }
20165a781ccbSVinicius Costa Gomes 
20175a781ccbSVinicius Costa Gomes static int taprio_graft(struct Qdisc *sch, unsigned long cl,
20185a781ccbSVinicius Costa Gomes 			struct Qdisc *new, struct Qdisc **old,
20195a781ccbSVinicius Costa Gomes 			struct netlink_ext_ack *extack)
20205a781ccbSVinicius Costa Gomes {
20215a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
20225a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
20235a781ccbSVinicius Costa Gomes 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
20245a781ccbSVinicius Costa Gomes 
20255a781ccbSVinicius Costa Gomes 	if (!dev_queue)
20265a781ccbSVinicius Costa Gomes 		return -EINVAL;
20275a781ccbSVinicius Costa Gomes 
20285a781ccbSVinicius Costa Gomes 	if (dev->flags & IFF_UP)
20295a781ccbSVinicius Costa Gomes 		dev_deactivate(dev);
20305a781ccbSVinicius Costa Gomes 
203113511704SYannick Vignon 	if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
203213511704SYannick Vignon 		*old = dev_graft_qdisc(dev_queue, new);
203313511704SYannick Vignon 	} else {
20345a781ccbSVinicius Costa Gomes 		*old = q->qdiscs[cl - 1];
20355a781ccbSVinicius Costa Gomes 		q->qdiscs[cl - 1] = new;
203613511704SYannick Vignon 	}
20375a781ccbSVinicius Costa Gomes 
20385a781ccbSVinicius Costa Gomes 	if (new)
20395a781ccbSVinicius Costa Gomes 		new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
20405a781ccbSVinicius Costa Gomes 
20415a781ccbSVinicius Costa Gomes 	if (dev->flags & IFF_UP)
20425a781ccbSVinicius Costa Gomes 		dev_activate(dev);
20435a781ccbSVinicius Costa Gomes 
20445a781ccbSVinicius Costa Gomes 	return 0;
20455a781ccbSVinicius Costa Gomes }
20465a781ccbSVinicius Costa Gomes 
20475a781ccbSVinicius Costa Gomes static int dump_entry(struct sk_buff *msg,
20485a781ccbSVinicius Costa Gomes 		      const struct sched_entry *entry)
20495a781ccbSVinicius Costa Gomes {
20505a781ccbSVinicius Costa Gomes 	struct nlattr *item;
20515a781ccbSVinicius Costa Gomes 
2052ae0be8deSMichal Kubecek 	item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY);
20535a781ccbSVinicius Costa Gomes 	if (!item)
20545a781ccbSVinicius Costa Gomes 		return -ENOSPC;
20555a781ccbSVinicius Costa Gomes 
20565a781ccbSVinicius Costa Gomes 	if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index))
20575a781ccbSVinicius Costa Gomes 		goto nla_put_failure;
20585a781ccbSVinicius Costa Gomes 
20595a781ccbSVinicius Costa Gomes 	if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command))
20605a781ccbSVinicius Costa Gomes 		goto nla_put_failure;
20615a781ccbSVinicius Costa Gomes 
20625a781ccbSVinicius Costa Gomes 	if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK,
20635a781ccbSVinicius Costa Gomes 			entry->gate_mask))
20645a781ccbSVinicius Costa Gomes 		goto nla_put_failure;
20655a781ccbSVinicius Costa Gomes 
20665a781ccbSVinicius Costa Gomes 	if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL,
20675a781ccbSVinicius Costa Gomes 			entry->interval))
20685a781ccbSVinicius Costa Gomes 		goto nla_put_failure;
20695a781ccbSVinicius Costa Gomes 
20705a781ccbSVinicius Costa Gomes 	return nla_nest_end(msg, item);
20715a781ccbSVinicius Costa Gomes 
20725a781ccbSVinicius Costa Gomes nla_put_failure:
20735a781ccbSVinicius Costa Gomes 	nla_nest_cancel(msg, item);
20745a781ccbSVinicius Costa Gomes 	return -1;
20755a781ccbSVinicius Costa Gomes }
20765a781ccbSVinicius Costa Gomes 
2077a3d43c0dSVinicius Costa Gomes static int dump_schedule(struct sk_buff *msg,
2078a3d43c0dSVinicius Costa Gomes 			 const struct sched_gate_list *root)
2079a3d43c0dSVinicius Costa Gomes {
2080a3d43c0dSVinicius Costa Gomes 	struct nlattr *entry_list;
2081a3d43c0dSVinicius Costa Gomes 	struct sched_entry *entry;
2082a3d43c0dSVinicius Costa Gomes 
2083a3d43c0dSVinicius Costa Gomes 	if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME,
2084a3d43c0dSVinicius Costa Gomes 			root->base_time, TCA_TAPRIO_PAD))
2085a3d43c0dSVinicius Costa Gomes 		return -1;
2086a3d43c0dSVinicius Costa Gomes 
20876ca6a665SVinicius Costa Gomes 	if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME,
20886ca6a665SVinicius Costa Gomes 			root->cycle_time, TCA_TAPRIO_PAD))
20896ca6a665SVinicius Costa Gomes 		return -1;
20906ca6a665SVinicius Costa Gomes 
2091c25031e9SVinicius Costa Gomes 	if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
2092c25031e9SVinicius Costa Gomes 			root->cycle_time_extension, TCA_TAPRIO_PAD))
2093c25031e9SVinicius Costa Gomes 		return -1;
2094c25031e9SVinicius Costa Gomes 
2095a3d43c0dSVinicius Costa Gomes 	entry_list = nla_nest_start_noflag(msg,
2096a3d43c0dSVinicius Costa Gomes 					   TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST);
2097a3d43c0dSVinicius Costa Gomes 	if (!entry_list)
2098a3d43c0dSVinicius Costa Gomes 		goto error_nest;
2099a3d43c0dSVinicius Costa Gomes 
2100a3d43c0dSVinicius Costa Gomes 	list_for_each_entry(entry, &root->entries, list) {
2101a3d43c0dSVinicius Costa Gomes 		if (dump_entry(msg, entry) < 0)
2102a3d43c0dSVinicius Costa Gomes 			goto error_nest;
2103a3d43c0dSVinicius Costa Gomes 	}
2104a3d43c0dSVinicius Costa Gomes 
2105a3d43c0dSVinicius Costa Gomes 	nla_nest_end(msg, entry_list);
2106a3d43c0dSVinicius Costa Gomes 	return 0;
2107a3d43c0dSVinicius Costa Gomes 
2108a3d43c0dSVinicius Costa Gomes error_nest:
2109a3d43c0dSVinicius Costa Gomes 	nla_nest_cancel(msg, entry_list);
2110a3d43c0dSVinicius Costa Gomes 	return -1;
2111a3d43c0dSVinicius Costa Gomes }
2112a3d43c0dSVinicius Costa Gomes 
2113a54fc09eSVladimir Oltean static int taprio_dump_tc_entries(struct taprio_sched *q, struct sk_buff *skb)
2114a54fc09eSVladimir Oltean {
2115a54fc09eSVladimir Oltean 	struct nlattr *n;
2116a54fc09eSVladimir Oltean 	int tc;
2117a54fc09eSVladimir Oltean 
2118a54fc09eSVladimir Oltean 	for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
2119a54fc09eSVladimir Oltean 		n = nla_nest_start(skb, TCA_TAPRIO_ATTR_TC_ENTRY);
2120a54fc09eSVladimir Oltean 		if (!n)
2121a54fc09eSVladimir Oltean 			return -EMSGSIZE;
2122a54fc09eSVladimir Oltean 
2123a54fc09eSVladimir Oltean 		if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_INDEX, tc))
2124a54fc09eSVladimir Oltean 			goto nla_put_failure;
2125a54fc09eSVladimir Oltean 
2126a54fc09eSVladimir Oltean 		if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_MAX_SDU,
2127a54fc09eSVladimir Oltean 				q->max_sdu[tc]))
2128a54fc09eSVladimir Oltean 			goto nla_put_failure;
2129a54fc09eSVladimir Oltean 
2130a54fc09eSVladimir Oltean 		nla_nest_end(skb, n);
2131a54fc09eSVladimir Oltean 	}
2132a54fc09eSVladimir Oltean 
2133a54fc09eSVladimir Oltean 	return 0;
2134a54fc09eSVladimir Oltean 
2135a54fc09eSVladimir Oltean nla_put_failure:
2136a54fc09eSVladimir Oltean 	nla_nest_cancel(skb, n);
2137a54fc09eSVladimir Oltean 	return -EMSGSIZE;
2138a54fc09eSVladimir Oltean }
2139a54fc09eSVladimir Oltean 
21405a781ccbSVinicius Costa Gomes static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
21415a781ccbSVinicius Costa Gomes {
21425a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
21435a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
2144a3d43c0dSVinicius Costa Gomes 	struct sched_gate_list *oper, *admin;
21455a781ccbSVinicius Costa Gomes 	struct tc_mqprio_qopt opt = { 0 };
2146a3d43c0dSVinicius Costa Gomes 	struct nlattr *nest, *sched_nest;
21475a781ccbSVinicius Costa Gomes 
214818cdd2f0SVladimir Oltean 	oper = rtnl_dereference(q->oper_sched);
214918cdd2f0SVladimir Oltean 	admin = rtnl_dereference(q->admin_sched);
2150a3d43c0dSVinicius Costa Gomes 
21519dd6ad67SVladimir Oltean 	mqprio_qopt_reconstruct(dev, &opt);
21525a781ccbSVinicius Costa Gomes 
2153ae0be8deSMichal Kubecek 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
21545a781ccbSVinicius Costa Gomes 	if (!nest)
2155a3d43c0dSVinicius Costa Gomes 		goto start_error;
21565a781ccbSVinicius Costa Gomes 
21575a781ccbSVinicius Costa Gomes 	if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
21585a781ccbSVinicius Costa Gomes 		goto options_error;
21595a781ccbSVinicius Costa Gomes 
21609c66d156SVinicius Costa Gomes 	if (!FULL_OFFLOAD_IS_ENABLED(q->flags) &&
21619c66d156SVinicius Costa Gomes 	    nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
21625a781ccbSVinicius Costa Gomes 		goto options_error;
21635a781ccbSVinicius Costa Gomes 
21644cfd5779SVedang Patel 	if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags))
21654cfd5779SVedang Patel 		goto options_error;
21664cfd5779SVedang Patel 
21674cfd5779SVedang Patel 	if (q->txtime_delay &&
2168a5b64700SVedang Patel 	    nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
21694cfd5779SVedang Patel 		goto options_error;
21704cfd5779SVedang Patel 
2171a54fc09eSVladimir Oltean 	if (taprio_dump_tc_entries(q, skb))
2172a54fc09eSVladimir Oltean 		goto options_error;
2173a54fc09eSVladimir Oltean 
2174a3d43c0dSVinicius Costa Gomes 	if (oper && dump_schedule(skb, oper))
21755a781ccbSVinicius Costa Gomes 		goto options_error;
21765a781ccbSVinicius Costa Gomes 
2177a3d43c0dSVinicius Costa Gomes 	if (!admin)
2178a3d43c0dSVinicius Costa Gomes 		goto done;
21795a781ccbSVinicius Costa Gomes 
2180a3d43c0dSVinicius Costa Gomes 	sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED);
2181e4acf427SColin Ian King 	if (!sched_nest)
2182e4acf427SColin Ian King 		goto options_error;
2183a3d43c0dSVinicius Costa Gomes 
2184a3d43c0dSVinicius Costa Gomes 	if (dump_schedule(skb, admin))
2185a3d43c0dSVinicius Costa Gomes 		goto admin_error;
2186a3d43c0dSVinicius Costa Gomes 
2187a3d43c0dSVinicius Costa Gomes 	nla_nest_end(skb, sched_nest);
2188a3d43c0dSVinicius Costa Gomes 
2189a3d43c0dSVinicius Costa Gomes done:
21905a781ccbSVinicius Costa Gomes 	return nla_nest_end(skb, nest);
21915a781ccbSVinicius Costa Gomes 
2192a3d43c0dSVinicius Costa Gomes admin_error:
2193a3d43c0dSVinicius Costa Gomes 	nla_nest_cancel(skb, sched_nest);
2194a3d43c0dSVinicius Costa Gomes 
21955a781ccbSVinicius Costa Gomes options_error:
21965a781ccbSVinicius Costa Gomes 	nla_nest_cancel(skb, nest);
2197a3d43c0dSVinicius Costa Gomes 
2198a3d43c0dSVinicius Costa Gomes start_error:
2199a3d43c0dSVinicius Costa Gomes 	return -ENOSPC;
22005a781ccbSVinicius Costa Gomes }
22015a781ccbSVinicius Costa Gomes 
22025a781ccbSVinicius Costa Gomes static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
22035a781ccbSVinicius Costa Gomes {
2204af7b29b1SVladimir Oltean 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
22055a781ccbSVinicius Costa Gomes 
2206af7b29b1SVladimir Oltean 	if (!dev_queue)
22075a781ccbSVinicius Costa Gomes 		return NULL;
22085a781ccbSVinicius Costa Gomes 
2209af7b29b1SVladimir Oltean 	return dev_queue->qdisc_sleeping;
22105a781ccbSVinicius Costa Gomes }
22115a781ccbSVinicius Costa Gomes 
22125a781ccbSVinicius Costa Gomes static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
22135a781ccbSVinicius Costa Gomes {
22145a781ccbSVinicius Costa Gomes 	unsigned int ntx = TC_H_MIN(classid);
22155a781ccbSVinicius Costa Gomes 
22165a781ccbSVinicius Costa Gomes 	if (!taprio_queue_get(sch, ntx))
22175a781ccbSVinicius Costa Gomes 		return 0;
22185a781ccbSVinicius Costa Gomes 	return ntx;
22195a781ccbSVinicius Costa Gomes }
22205a781ccbSVinicius Costa Gomes 
22215a781ccbSVinicius Costa Gomes static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
22225a781ccbSVinicius Costa Gomes 			     struct sk_buff *skb, struct tcmsg *tcm)
22235a781ccbSVinicius Costa Gomes {
22245a781ccbSVinicius Costa Gomes 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
22255a781ccbSVinicius Costa Gomes 
22265a781ccbSVinicius Costa Gomes 	tcm->tcm_parent = TC_H_ROOT;
22275a781ccbSVinicius Costa Gomes 	tcm->tcm_handle |= TC_H_MIN(cl);
22285a781ccbSVinicius Costa Gomes 	tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
22295a781ccbSVinicius Costa Gomes 
22305a781ccbSVinicius Costa Gomes 	return 0;
22315a781ccbSVinicius Costa Gomes }
22325a781ccbSVinicius Costa Gomes 
22335a781ccbSVinicius Costa Gomes static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
22345a781ccbSVinicius Costa Gomes 				   struct gnet_dump *d)
22355a781ccbSVinicius Costa Gomes 	__releases(d->lock)
22365a781ccbSVinicius Costa Gomes 	__acquires(d->lock)
22375a781ccbSVinicius Costa Gomes {
22385a781ccbSVinicius Costa Gomes 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
22395a781ccbSVinicius Costa Gomes 
22405a781ccbSVinicius Costa Gomes 	sch = dev_queue->qdisc_sleeping;
224129cbcd85SAhmed S. Darwish 	if (gnet_stats_copy_basic(d, NULL, &sch->bstats, true) < 0 ||
22425dd431b6SPaolo Abeni 	    qdisc_qstats_copy(d, sch) < 0)
22435a781ccbSVinicius Costa Gomes 		return -1;
22445a781ccbSVinicius Costa Gomes 	return 0;
22455a781ccbSVinicius Costa Gomes }
22465a781ccbSVinicius Costa Gomes 
22475a781ccbSVinicius Costa Gomes static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
22485a781ccbSVinicius Costa Gomes {
22495a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
22505a781ccbSVinicius Costa Gomes 	unsigned long ntx;
22515a781ccbSVinicius Costa Gomes 
22525a781ccbSVinicius Costa Gomes 	if (arg->stop)
22535a781ccbSVinicius Costa Gomes 		return;
22545a781ccbSVinicius Costa Gomes 
22555a781ccbSVinicius Costa Gomes 	arg->count = arg->skip;
22565a781ccbSVinicius Costa Gomes 	for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
2257e046fa89SZhengchao Shao 		if (!tc_qdisc_stats_dump(sch, ntx + 1, arg))
22585a781ccbSVinicius Costa Gomes 			break;
22595a781ccbSVinicius Costa Gomes 	}
22605a781ccbSVinicius Costa Gomes }
22615a781ccbSVinicius Costa Gomes 
22625a781ccbSVinicius Costa Gomes static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
22635a781ccbSVinicius Costa Gomes 						struct tcmsg *tcm)
22645a781ccbSVinicius Costa Gomes {
22655a781ccbSVinicius Costa Gomes 	return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
22665a781ccbSVinicius Costa Gomes }
22675a781ccbSVinicius Costa Gomes 
22685a781ccbSVinicius Costa Gomes static const struct Qdisc_class_ops taprio_class_ops = {
22695a781ccbSVinicius Costa Gomes 	.graft		= taprio_graft,
22705a781ccbSVinicius Costa Gomes 	.leaf		= taprio_leaf,
22715a781ccbSVinicius Costa Gomes 	.find		= taprio_find,
22725a781ccbSVinicius Costa Gomes 	.walk		= taprio_walk,
22735a781ccbSVinicius Costa Gomes 	.dump		= taprio_dump_class,
22745a781ccbSVinicius Costa Gomes 	.dump_stats	= taprio_dump_class_stats,
22755a781ccbSVinicius Costa Gomes 	.select_queue	= taprio_select_queue,
22765a781ccbSVinicius Costa Gomes };
22775a781ccbSVinicius Costa Gomes 
22785a781ccbSVinicius Costa Gomes static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
22795a781ccbSVinicius Costa Gomes 	.cl_ops		= &taprio_class_ops,
22805a781ccbSVinicius Costa Gomes 	.id		= "taprio",
22815a781ccbSVinicius Costa Gomes 	.priv_size	= sizeof(struct taprio_sched),
22825a781ccbSVinicius Costa Gomes 	.init		= taprio_init,
2283a3d43c0dSVinicius Costa Gomes 	.change		= taprio_change,
22845a781ccbSVinicius Costa Gomes 	.destroy	= taprio_destroy,
228544d4775cSDavide Caratti 	.reset		= taprio_reset,
228613511704SYannick Vignon 	.attach		= taprio_attach,
22875a781ccbSVinicius Costa Gomes 	.peek		= taprio_peek,
22885a781ccbSVinicius Costa Gomes 	.dequeue	= taprio_dequeue,
22895a781ccbSVinicius Costa Gomes 	.enqueue	= taprio_enqueue,
22905a781ccbSVinicius Costa Gomes 	.dump		= taprio_dump,
22915a781ccbSVinicius Costa Gomes 	.owner		= THIS_MODULE,
22925a781ccbSVinicius Costa Gomes };
22935a781ccbSVinicius Costa Gomes 
22947b9eba7bSLeandro Dorileo static struct notifier_block taprio_device_notifier = {
22957b9eba7bSLeandro Dorileo 	.notifier_call = taprio_dev_notifier,
22967b9eba7bSLeandro Dorileo };
22977b9eba7bSLeandro Dorileo 
22985a781ccbSVinicius Costa Gomes static int __init taprio_module_init(void)
22995a781ccbSVinicius Costa Gomes {
23007b9eba7bSLeandro Dorileo 	int err = register_netdevice_notifier(&taprio_device_notifier);
23017b9eba7bSLeandro Dorileo 
23027b9eba7bSLeandro Dorileo 	if (err)
23037b9eba7bSLeandro Dorileo 		return err;
23047b9eba7bSLeandro Dorileo 
23055a781ccbSVinicius Costa Gomes 	return register_qdisc(&taprio_qdisc_ops);
23065a781ccbSVinicius Costa Gomes }
23075a781ccbSVinicius Costa Gomes 
23085a781ccbSVinicius Costa Gomes static void __exit taprio_module_exit(void)
23095a781ccbSVinicius Costa Gomes {
23105a781ccbSVinicius Costa Gomes 	unregister_qdisc(&taprio_qdisc_ops);
23117b9eba7bSLeandro Dorileo 	unregister_netdevice_notifier(&taprio_device_notifier);
23125a781ccbSVinicius Costa Gomes }
23135a781ccbSVinicius Costa Gomes 
23145a781ccbSVinicius Costa Gomes module_init(taprio_module_init);
23155a781ccbSVinicius Costa Gomes module_exit(taprio_module_exit);
23165a781ccbSVinicius Costa Gomes MODULE_LICENSE("GPL");
2317