xref: /openbmc/linux/net/sched/sch_taprio.c (revision 2b84960f)
15a781ccbSVinicius Costa Gomes // SPDX-License-Identifier: GPL-2.0
25a781ccbSVinicius Costa Gomes 
35a781ccbSVinicius Costa Gomes /* net/sched/sch_taprio.c	 Time Aware Priority Scheduler
45a781ccbSVinicius Costa Gomes  *
55a781ccbSVinicius Costa Gomes  * Authors:	Vinicius Costa Gomes <vinicius.gomes@intel.com>
65a781ccbSVinicius Costa Gomes  *
75a781ccbSVinicius Costa Gomes  */
85a781ccbSVinicius Costa Gomes 
9cc69837fSJakub Kicinski #include <linux/ethtool.h>
10a721c3e5SVladimir Oltean #include <linux/ethtool_netlink.h>
115a781ccbSVinicius Costa Gomes #include <linux/types.h>
125a781ccbSVinicius Costa Gomes #include <linux/slab.h>
135a781ccbSVinicius Costa Gomes #include <linux/kernel.h>
145a781ccbSVinicius Costa Gomes #include <linux/string.h>
155a781ccbSVinicius Costa Gomes #include <linux/list.h>
165a781ccbSVinicius Costa Gomes #include <linux/errno.h>
175a781ccbSVinicius Costa Gomes #include <linux/skbuff.h>
1823bddf69SJakub Kicinski #include <linux/math64.h>
195a781ccbSVinicius Costa Gomes #include <linux/module.h>
205a781ccbSVinicius Costa Gomes #include <linux/spinlock.h>
21a3d43c0dSVinicius Costa Gomes #include <linux/rcupdate.h>
22837ced3aSVladimir Oltean #include <linux/time.h>
23d457a0e3SEric Dumazet #include <net/gso.h>
245a781ccbSVinicius Costa Gomes #include <net/netlink.h>
255a781ccbSVinicius Costa Gomes #include <net/pkt_sched.h>
265a781ccbSVinicius Costa Gomes #include <net/pkt_cls.h>
275a781ccbSVinicius Costa Gomes #include <net/sch_generic.h>
284cfd5779SVedang Patel #include <net/sock.h>
2954002066SVedang Patel #include <net/tcp.h>
305a781ccbSVinicius Costa Gomes 
316c1adb65SVladimir Oltean #define TAPRIO_STAT_NOT_SET	(~0ULL)
326c1adb65SVladimir Oltean 
331dfe086dSVladimir Oltean #include "sch_mqprio_lib.h"
341dfe086dSVladimir Oltean 
357b9eba7bSLeandro Dorileo static LIST_HEAD(taprio_list);
362f530df7SVladimir Oltean static struct static_key_false taprio_have_broken_mqprio;
372f530df7SVladimir Oltean static struct static_key_false taprio_have_working_mqprio;
387b9eba7bSLeandro Dorileo 
395a781ccbSVinicius Costa Gomes #define TAPRIO_ALL_GATES_OPEN -1
405a781ccbSVinicius Costa Gomes 
414cfd5779SVedang Patel #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
429c66d156SVinicius Costa Gomes #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
43a9d62274SVinicius Costa Gomes #define TAPRIO_FLAGS_INVALID U32_MAX
444cfd5779SVedang Patel 
455a781ccbSVinicius Costa Gomes struct sched_entry {
46a306a90cSVladimir Oltean 	/* Durations between this GCL entry and the GCL entry where the
47a306a90cSVladimir Oltean 	 * respective traffic class gate closes
48a306a90cSVladimir Oltean 	 */
49a306a90cSVladimir Oltean 	u64 gate_duration[TC_MAX_QUEUE];
50d2ad689dSVladimir Oltean 	atomic_t budget[TC_MAX_QUEUE];
51a1e6ad30SVladimir Oltean 	/* The qdisc makes some effort so that no packet leaves
52a1e6ad30SVladimir Oltean 	 * after this time
535a781ccbSVinicius Costa Gomes 	 */
54a1e6ad30SVladimir Oltean 	ktime_t gate_close_time[TC_MAX_QUEUE];
55a1e6ad30SVladimir Oltean 	struct list_head list;
56a1e6ad30SVladimir Oltean 	/* Used to calculate when to advance the schedule */
57e5517551SVladimir Oltean 	ktime_t end_time;
584cfd5779SVedang Patel 	ktime_t next_txtime;
595a781ccbSVinicius Costa Gomes 	int index;
605a781ccbSVinicius Costa Gomes 	u32 gate_mask;
615a781ccbSVinicius Costa Gomes 	u32 interval;
625a781ccbSVinicius Costa Gomes 	u8 command;
635a781ccbSVinicius Costa Gomes };
645a781ccbSVinicius Costa Gomes 
65a3d43c0dSVinicius Costa Gomes struct sched_gate_list {
66a306a90cSVladimir Oltean 	/* Longest non-zero contiguous gate durations per traffic class,
67a306a90cSVladimir Oltean 	 * or 0 if a traffic class gate never opens during the schedule.
68a306a90cSVladimir Oltean 	 */
69a306a90cSVladimir Oltean 	u64 max_open_gate_duration[TC_MAX_QUEUE];
70a878fd46SVladimir Oltean 	u32 max_frm_len[TC_MAX_QUEUE]; /* for the fast path */
71fed87cc6SVladimir Oltean 	u32 max_sdu[TC_MAX_QUEUE]; /* for dump */
72a3d43c0dSVinicius Costa Gomes 	struct rcu_head rcu;
73a3d43c0dSVinicius Costa Gomes 	struct list_head entries;
74a3d43c0dSVinicius Costa Gomes 	size_t num_entries;
75e5517551SVladimir Oltean 	ktime_t cycle_end_time;
766ca6a665SVinicius Costa Gomes 	s64 cycle_time;
77c25031e9SVinicius Costa Gomes 	s64 cycle_time_extension;
78a3d43c0dSVinicius Costa Gomes 	s64 base_time;
79a3d43c0dSVinicius Costa Gomes };
80a3d43c0dSVinicius Costa Gomes 
815a781ccbSVinicius Costa Gomes struct taprio_sched {
825a781ccbSVinicius Costa Gomes 	struct Qdisc **qdiscs;
835a781ccbSVinicius Costa Gomes 	struct Qdisc *root;
844cfd5779SVedang Patel 	u32 flags;
857ede7b03SVedang Patel 	enum tk_offsets tk_offset;
865a781ccbSVinicius Costa Gomes 	int clockid;
87db46e3a8SVladimir Oltean 	bool offloaded;
882f530df7SVladimir Oltean 	bool detected_mqprio;
892f530df7SVladimir Oltean 	bool broken_mqprio;
907b9eba7bSLeandro Dorileo 	atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
915a781ccbSVinicius Costa Gomes 				    * speeds it's sub-nanoseconds per byte
925a781ccbSVinicius Costa Gomes 				    */
935a781ccbSVinicius Costa Gomes 
945a781ccbSVinicius Costa Gomes 	/* Protects the update side of the RCU protected current_entry */
955a781ccbSVinicius Costa Gomes 	spinlock_t current_entry_lock;
965a781ccbSVinicius Costa Gomes 	struct sched_entry __rcu *current_entry;
97a3d43c0dSVinicius Costa Gomes 	struct sched_gate_list __rcu *oper_sched;
98a3d43c0dSVinicius Costa Gomes 	struct sched_gate_list __rcu *admin_sched;
995a781ccbSVinicius Costa Gomes 	struct hrtimer advance_timer;
1007b9eba7bSLeandro Dorileo 	struct list_head taprio_list;
1012f530df7SVladimir Oltean 	int cur_txq[TC_MAX_QUEUE];
102fed87cc6SVladimir Oltean 	u32 max_sdu[TC_MAX_QUEUE]; /* save info from the user */
103a721c3e5SVladimir Oltean 	u32 fp[TC_QOPT_MAX_QUEUE]; /* only for dump and offloading */
104a5b64700SVedang Patel 	u32 txtime_delay;
1055a781ccbSVinicius Costa Gomes };
1065a781ccbSVinicius Costa Gomes 
1079c66d156SVinicius Costa Gomes struct __tc_taprio_qopt_offload {
1089c66d156SVinicius Costa Gomes 	refcount_t users;
1099c66d156SVinicius Costa Gomes 	struct tc_taprio_qopt_offload offload;
1109c66d156SVinicius Costa Gomes };
1119c66d156SVinicius Costa Gomes 
112a306a90cSVladimir Oltean static void taprio_calculate_gate_durations(struct taprio_sched *q,
113a306a90cSVladimir Oltean 					    struct sched_gate_list *sched)
114a306a90cSVladimir Oltean {
115a306a90cSVladimir Oltean 	struct net_device *dev = qdisc_dev(q->root);
116a306a90cSVladimir Oltean 	int num_tc = netdev_get_num_tc(dev);
117a306a90cSVladimir Oltean 	struct sched_entry *entry, *cur;
118a306a90cSVladimir Oltean 	int tc;
119a306a90cSVladimir Oltean 
120a306a90cSVladimir Oltean 	list_for_each_entry(entry, &sched->entries, list) {
121a306a90cSVladimir Oltean 		u32 gates_still_open = entry->gate_mask;
122a306a90cSVladimir Oltean 
123a306a90cSVladimir Oltean 		/* For each traffic class, calculate each open gate duration,
124a306a90cSVladimir Oltean 		 * starting at this schedule entry and ending at the schedule
125a306a90cSVladimir Oltean 		 * entry containing a gate close event for that TC.
126a306a90cSVladimir Oltean 		 */
127a306a90cSVladimir Oltean 		cur = entry;
128a306a90cSVladimir Oltean 
129a306a90cSVladimir Oltean 		do {
130a306a90cSVladimir Oltean 			if (!gates_still_open)
131a306a90cSVladimir Oltean 				break;
132a306a90cSVladimir Oltean 
133a306a90cSVladimir Oltean 			for (tc = 0; tc < num_tc; tc++) {
134a306a90cSVladimir Oltean 				if (!(gates_still_open & BIT(tc)))
135a306a90cSVladimir Oltean 					continue;
136a306a90cSVladimir Oltean 
137a306a90cSVladimir Oltean 				if (cur->gate_mask & BIT(tc))
138a306a90cSVladimir Oltean 					entry->gate_duration[tc] += cur->interval;
139a306a90cSVladimir Oltean 				else
140a306a90cSVladimir Oltean 					gates_still_open &= ~BIT(tc);
141a306a90cSVladimir Oltean 			}
142a306a90cSVladimir Oltean 
143a306a90cSVladimir Oltean 			cur = list_next_entry_circular(cur, &sched->entries, list);
144a306a90cSVladimir Oltean 		} while (cur != entry);
145a306a90cSVladimir Oltean 
146a306a90cSVladimir Oltean 		/* Keep track of the maximum gate duration for each traffic
147a306a90cSVladimir Oltean 		 * class, taking care to not confuse a traffic class which is
148a306a90cSVladimir Oltean 		 * temporarily closed with one that is always closed.
149a306a90cSVladimir Oltean 		 */
150a306a90cSVladimir Oltean 		for (tc = 0; tc < num_tc; tc++)
151a306a90cSVladimir Oltean 			if (entry->gate_duration[tc] &&
152a306a90cSVladimir Oltean 			    sched->max_open_gate_duration[tc] < entry->gate_duration[tc])
153a306a90cSVladimir Oltean 				sched->max_open_gate_duration[tc] = entry->gate_duration[tc];
154a306a90cSVladimir Oltean 	}
155a306a90cSVladimir Oltean }
156a306a90cSVladimir Oltean 
157a1e6ad30SVladimir Oltean static bool taprio_entry_allows_tx(ktime_t skb_end_time,
158a1e6ad30SVladimir Oltean 				   struct sched_entry *entry, int tc)
159a1e6ad30SVladimir Oltean {
160a1e6ad30SVladimir Oltean 	return ktime_before(skb_end_time, entry->gate_close_time[tc]);
161a1e6ad30SVladimir Oltean }
162a1e6ad30SVladimir Oltean 
163a3d43c0dSVinicius Costa Gomes static ktime_t sched_base_time(const struct sched_gate_list *sched)
164a3d43c0dSVinicius Costa Gomes {
165a3d43c0dSVinicius Costa Gomes 	if (!sched)
166a3d43c0dSVinicius Costa Gomes 		return KTIME_MAX;
167a3d43c0dSVinicius Costa Gomes 
168a3d43c0dSVinicius Costa Gomes 	return ns_to_ktime(sched->base_time);
169a3d43c0dSVinicius Costa Gomes }
170a3d43c0dSVinicius Costa Gomes 
1716dc25401SEric Dumazet static ktime_t taprio_mono_to_any(const struct taprio_sched *q, ktime_t mono)
1727ede7b03SVedang Patel {
1736dc25401SEric Dumazet 	/* This pairs with WRITE_ONCE() in taprio_parse_clockid() */
1746dc25401SEric Dumazet 	enum tk_offsets tk_offset = READ_ONCE(q->tk_offset);
1757ede7b03SVedang Patel 
1766dc25401SEric Dumazet 	switch (tk_offset) {
1777ede7b03SVedang Patel 	case TK_OFFS_MAX:
1787ede7b03SVedang Patel 		return mono;
1797ede7b03SVedang Patel 	default:
1806dc25401SEric Dumazet 		return ktime_mono_to_any(mono, tk_offset);
1816dc25401SEric Dumazet 	}
1827ede7b03SVedang Patel }
1837ede7b03SVedang Patel 
1846dc25401SEric Dumazet static ktime_t taprio_get_time(const struct taprio_sched *q)
1856dc25401SEric Dumazet {
1866dc25401SEric Dumazet 	return taprio_mono_to_any(q, ktime_get());
1877ede7b03SVedang Patel }
1887ede7b03SVedang Patel 
189a3d43c0dSVinicius Costa Gomes static void taprio_free_sched_cb(struct rcu_head *head)
190a3d43c0dSVinicius Costa Gomes {
191a3d43c0dSVinicius Costa Gomes 	struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu);
192a3d43c0dSVinicius Costa Gomes 	struct sched_entry *entry, *n;
193a3d43c0dSVinicius Costa Gomes 
194a3d43c0dSVinicius Costa Gomes 	list_for_each_entry_safe(entry, n, &sched->entries, list) {
195a3d43c0dSVinicius Costa Gomes 		list_del(&entry->list);
196a3d43c0dSVinicius Costa Gomes 		kfree(entry);
197a3d43c0dSVinicius Costa Gomes 	}
198a3d43c0dSVinicius Costa Gomes 
199a3d43c0dSVinicius Costa Gomes 	kfree(sched);
200a3d43c0dSVinicius Costa Gomes }
201a3d43c0dSVinicius Costa Gomes 
202a3d43c0dSVinicius Costa Gomes static void switch_schedules(struct taprio_sched *q,
203a3d43c0dSVinicius Costa Gomes 			     struct sched_gate_list **admin,
204a3d43c0dSVinicius Costa Gomes 			     struct sched_gate_list **oper)
205a3d43c0dSVinicius Costa Gomes {
206a3d43c0dSVinicius Costa Gomes 	rcu_assign_pointer(q->oper_sched, *admin);
207a3d43c0dSVinicius Costa Gomes 	rcu_assign_pointer(q->admin_sched, NULL);
208a3d43c0dSVinicius Costa Gomes 
209a3d43c0dSVinicius Costa Gomes 	if (*oper)
210a3d43c0dSVinicius Costa Gomes 		call_rcu(&(*oper)->rcu, taprio_free_sched_cb);
211a3d43c0dSVinicius Costa Gomes 
212a3d43c0dSVinicius Costa Gomes 	*oper = *admin;
213a3d43c0dSVinicius Costa Gomes 	*admin = NULL;
214a3d43c0dSVinicius Costa Gomes }
215a3d43c0dSVinicius Costa Gomes 
2164cfd5779SVedang Patel /* Get how much time has been already elapsed in the current cycle. */
2174cfd5779SVedang Patel static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time)
2184cfd5779SVedang Patel {
2194cfd5779SVedang Patel 	ktime_t time_since_sched_start;
2204cfd5779SVedang Patel 	s32 time_elapsed;
2214cfd5779SVedang Patel 
2224cfd5779SVedang Patel 	time_since_sched_start = ktime_sub(time, sched->base_time);
2234cfd5779SVedang Patel 	div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed);
2244cfd5779SVedang Patel 
2254cfd5779SVedang Patel 	return time_elapsed;
2264cfd5779SVedang Patel }
2274cfd5779SVedang Patel 
2284cfd5779SVedang Patel static ktime_t get_interval_end_time(struct sched_gate_list *sched,
2294cfd5779SVedang Patel 				     struct sched_gate_list *admin,
2304cfd5779SVedang Patel 				     struct sched_entry *entry,
2314cfd5779SVedang Patel 				     ktime_t intv_start)
2324cfd5779SVedang Patel {
2334cfd5779SVedang Patel 	s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start);
2344cfd5779SVedang Patel 	ktime_t intv_end, cycle_ext_end, cycle_end;
2354cfd5779SVedang Patel 
2364cfd5779SVedang Patel 	cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed);
2374cfd5779SVedang Patel 	intv_end = ktime_add_ns(intv_start, entry->interval);
2384cfd5779SVedang Patel 	cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension);
2394cfd5779SVedang Patel 
2404cfd5779SVedang Patel 	if (ktime_before(intv_end, cycle_end))
2414cfd5779SVedang Patel 		return intv_end;
2424cfd5779SVedang Patel 	else if (admin && admin != sched &&
2434cfd5779SVedang Patel 		 ktime_after(admin->base_time, cycle_end) &&
2444cfd5779SVedang Patel 		 ktime_before(admin->base_time, cycle_ext_end))
2454cfd5779SVedang Patel 		return admin->base_time;
2464cfd5779SVedang Patel 	else
2474cfd5779SVedang Patel 		return cycle_end;
2484cfd5779SVedang Patel }
2494cfd5779SVedang Patel 
2504cfd5779SVedang Patel static int length_to_duration(struct taprio_sched *q, int len)
2514cfd5779SVedang Patel {
252837ced3aSVladimir Oltean 	return div_u64(len * atomic64_read(&q->picos_per_byte), PSEC_PER_NSEC);
2534cfd5779SVedang Patel }
2544cfd5779SVedang Patel 
255fed87cc6SVladimir Oltean static int duration_to_length(struct taprio_sched *q, u64 duration)
256fed87cc6SVladimir Oltean {
257fed87cc6SVladimir Oltean 	return div_u64(duration * PSEC_PER_NSEC, atomic64_read(&q->picos_per_byte));
258fed87cc6SVladimir Oltean }
259fed87cc6SVladimir Oltean 
260fed87cc6SVladimir Oltean /* Sets sched->max_sdu[] and sched->max_frm_len[] to the minimum between the
261fed87cc6SVladimir Oltean  * q->max_sdu[] requested by the user and the max_sdu dynamically determined by
262fed87cc6SVladimir Oltean  * the maximum open gate durations at the given link speed.
263fed87cc6SVladimir Oltean  */
264a878fd46SVladimir Oltean static void taprio_update_queue_max_sdu(struct taprio_sched *q,
265fed87cc6SVladimir Oltean 					struct sched_gate_list *sched,
266fed87cc6SVladimir Oltean 					struct qdisc_size_table *stab)
267a878fd46SVladimir Oltean {
268a878fd46SVladimir Oltean 	struct net_device *dev = qdisc_dev(q->root);
269a878fd46SVladimir Oltean 	int num_tc = netdev_get_num_tc(dev);
270fed87cc6SVladimir Oltean 	u32 max_sdu_from_user;
271fed87cc6SVladimir Oltean 	u32 max_sdu_dynamic;
272fed87cc6SVladimir Oltean 	u32 max_sdu;
273a878fd46SVladimir Oltean 	int tc;
274a878fd46SVladimir Oltean 
275a878fd46SVladimir Oltean 	for (tc = 0; tc < num_tc; tc++) {
276fed87cc6SVladimir Oltean 		max_sdu_from_user = q->max_sdu[tc] ?: U32_MAX;
277fed87cc6SVladimir Oltean 
278fed87cc6SVladimir Oltean 		/* TC gate never closes => keep the queueMaxSDU
279fed87cc6SVladimir Oltean 		 * selected by the user
280fed87cc6SVladimir Oltean 		 */
281fed87cc6SVladimir Oltean 		if (sched->max_open_gate_duration[tc] == sched->cycle_time) {
282fed87cc6SVladimir Oltean 			max_sdu_dynamic = U32_MAX;
283fed87cc6SVladimir Oltean 		} else {
284fed87cc6SVladimir Oltean 			u32 max_frm_len;
285fed87cc6SVladimir Oltean 
286fed87cc6SVladimir Oltean 			max_frm_len = duration_to_length(q, sched->max_open_gate_duration[tc]);
287bdf366bdSVladimir Oltean 			/* Compensate for L1 overhead from size table,
288bdf366bdSVladimir Oltean 			 * but don't let the frame size go negative
289bdf366bdSVladimir Oltean 			 */
290bdf366bdSVladimir Oltean 			if (stab) {
291fed87cc6SVladimir Oltean 				max_frm_len -= stab->szopts.overhead;
292bdf366bdSVladimir Oltean 				max_frm_len = max_t(int, max_frm_len,
293bdf366bdSVladimir Oltean 						    dev->hard_header_len + 1);
294bdf366bdSVladimir Oltean 			}
295fed87cc6SVladimir Oltean 			max_sdu_dynamic = max_frm_len - dev->hard_header_len;
29664cb6aadSVladimir Oltean 			if (max_sdu_dynamic > dev->max_mtu)
29764cb6aadSVladimir Oltean 				max_sdu_dynamic = U32_MAX;
298fed87cc6SVladimir Oltean 		}
299fed87cc6SVladimir Oltean 
300fed87cc6SVladimir Oltean 		max_sdu = min(max_sdu_dynamic, max_sdu_from_user);
301fed87cc6SVladimir Oltean 
302fed87cc6SVladimir Oltean 		if (max_sdu != U32_MAX) {
303fed87cc6SVladimir Oltean 			sched->max_frm_len[tc] = max_sdu + dev->hard_header_len;
304fed87cc6SVladimir Oltean 			sched->max_sdu[tc] = max_sdu;
305fed87cc6SVladimir Oltean 		} else {
306a878fd46SVladimir Oltean 			sched->max_frm_len[tc] = U32_MAX; /* never oversized */
307fed87cc6SVladimir Oltean 			sched->max_sdu[tc] = 0;
308fed87cc6SVladimir Oltean 		}
309a878fd46SVladimir Oltean 	}
310a878fd46SVladimir Oltean }
311a878fd46SVladimir Oltean 
3124cfd5779SVedang Patel /* Returns the entry corresponding to next available interval. If
3134cfd5779SVedang Patel  * validate_interval is set, it only validates whether the timestamp occurs
3144cfd5779SVedang Patel  * when the gate corresponding to the skb's traffic class is open.
3154cfd5779SVedang Patel  */
3164cfd5779SVedang Patel static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb,
3174cfd5779SVedang Patel 						  struct Qdisc *sch,
3184cfd5779SVedang Patel 						  struct sched_gate_list *sched,
3194cfd5779SVedang Patel 						  struct sched_gate_list *admin,
3204cfd5779SVedang Patel 						  ktime_t time,
3214cfd5779SVedang Patel 						  ktime_t *interval_start,
3224cfd5779SVedang Patel 						  ktime_t *interval_end,
3234cfd5779SVedang Patel 						  bool validate_interval)
3244cfd5779SVedang Patel {
3254cfd5779SVedang Patel 	ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time;
3264cfd5779SVedang Patel 	ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time;
3274cfd5779SVedang Patel 	struct sched_entry *entry = NULL, *entry_found = NULL;
3284cfd5779SVedang Patel 	struct taprio_sched *q = qdisc_priv(sch);
3294cfd5779SVedang Patel 	struct net_device *dev = qdisc_dev(sch);
3304cfd5779SVedang Patel 	bool entry_available = false;
3314cfd5779SVedang Patel 	s32 cycle_elapsed;
3324cfd5779SVedang Patel 	int tc, n;
3334cfd5779SVedang Patel 
3344cfd5779SVedang Patel 	tc = netdev_get_prio_tc_map(dev, skb->priority);
3354cfd5779SVedang Patel 	packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb));
3364cfd5779SVedang Patel 
3374cfd5779SVedang Patel 	*interval_start = 0;
3384cfd5779SVedang Patel 	*interval_end = 0;
3394cfd5779SVedang Patel 
3404cfd5779SVedang Patel 	if (!sched)
3414cfd5779SVedang Patel 		return NULL;
3424cfd5779SVedang Patel 
3434cfd5779SVedang Patel 	cycle = sched->cycle_time;
3444cfd5779SVedang Patel 	cycle_elapsed = get_cycle_time_elapsed(sched, time);
3454cfd5779SVedang Patel 	curr_intv_end = ktime_sub_ns(time, cycle_elapsed);
3464cfd5779SVedang Patel 	cycle_end = ktime_add_ns(curr_intv_end, cycle);
3474cfd5779SVedang Patel 
3484cfd5779SVedang Patel 	list_for_each_entry(entry, &sched->entries, list) {
3494cfd5779SVedang Patel 		curr_intv_start = curr_intv_end;
3504cfd5779SVedang Patel 		curr_intv_end = get_interval_end_time(sched, admin, entry,
3514cfd5779SVedang Patel 						      curr_intv_start);
3524cfd5779SVedang Patel 
3534cfd5779SVedang Patel 		if (ktime_after(curr_intv_start, cycle_end))
3544cfd5779SVedang Patel 			break;
3554cfd5779SVedang Patel 
3564cfd5779SVedang Patel 		if (!(entry->gate_mask & BIT(tc)) ||
3574cfd5779SVedang Patel 		    packet_transmit_time > entry->interval)
3584cfd5779SVedang Patel 			continue;
3594cfd5779SVedang Patel 
3604cfd5779SVedang Patel 		txtime = entry->next_txtime;
3614cfd5779SVedang Patel 
3624cfd5779SVedang Patel 		if (ktime_before(txtime, time) || validate_interval) {
3634cfd5779SVedang Patel 			transmit_end_time = ktime_add_ns(time, packet_transmit_time);
3644cfd5779SVedang Patel 			if ((ktime_before(curr_intv_start, time) &&
3654cfd5779SVedang Patel 			     ktime_before(transmit_end_time, curr_intv_end)) ||
3664cfd5779SVedang Patel 			    (ktime_after(curr_intv_start, time) && !validate_interval)) {
3674cfd5779SVedang Patel 				entry_found = entry;
3684cfd5779SVedang Patel 				*interval_start = curr_intv_start;
3694cfd5779SVedang Patel 				*interval_end = curr_intv_end;
3704cfd5779SVedang Patel 				break;
3714cfd5779SVedang Patel 			} else if (!entry_available && !validate_interval) {
3724cfd5779SVedang Patel 				/* Here, we are just trying to find out the
3734cfd5779SVedang Patel 				 * first available interval in the next cycle.
3744cfd5779SVedang Patel 				 */
3750deee7aaSJiapeng Zhong 				entry_available = true;
3764cfd5779SVedang Patel 				entry_found = entry;
3774cfd5779SVedang Patel 				*interval_start = ktime_add_ns(curr_intv_start, cycle);
3784cfd5779SVedang Patel 				*interval_end = ktime_add_ns(curr_intv_end, cycle);
3794cfd5779SVedang Patel 			}
3804cfd5779SVedang Patel 		} else if (ktime_before(txtime, earliest_txtime) &&
3814cfd5779SVedang Patel 			   !entry_available) {
3824cfd5779SVedang Patel 			earliest_txtime = txtime;
3834cfd5779SVedang Patel 			entry_found = entry;
3844cfd5779SVedang Patel 			n = div_s64(ktime_sub(txtime, curr_intv_start), cycle);
3854cfd5779SVedang Patel 			*interval_start = ktime_add(curr_intv_start, n * cycle);
3864cfd5779SVedang Patel 			*interval_end = ktime_add(curr_intv_end, n * cycle);
3874cfd5779SVedang Patel 		}
3884cfd5779SVedang Patel 	}
3894cfd5779SVedang Patel 
3904cfd5779SVedang Patel 	return entry_found;
3914cfd5779SVedang Patel }
3924cfd5779SVedang Patel 
3934cfd5779SVedang Patel static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
3944cfd5779SVedang Patel {
3954cfd5779SVedang Patel 	struct taprio_sched *q = qdisc_priv(sch);
3964cfd5779SVedang Patel 	struct sched_gate_list *sched, *admin;
3974cfd5779SVedang Patel 	ktime_t interval_start, interval_end;
3984cfd5779SVedang Patel 	struct sched_entry *entry;
3994cfd5779SVedang Patel 
4004cfd5779SVedang Patel 	rcu_read_lock();
4014cfd5779SVedang Patel 	sched = rcu_dereference(q->oper_sched);
4024cfd5779SVedang Patel 	admin = rcu_dereference(q->admin_sched);
4034cfd5779SVedang Patel 
4044cfd5779SVedang Patel 	entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp,
4054cfd5779SVedang Patel 				       &interval_start, &interval_end, true);
4064cfd5779SVedang Patel 	rcu_read_unlock();
4074cfd5779SVedang Patel 
4084cfd5779SVedang Patel 	return entry;
4094cfd5779SVedang Patel }
4104cfd5779SVedang Patel 
4119c66d156SVinicius Costa Gomes static bool taprio_flags_valid(u32 flags)
4129c66d156SVinicius Costa Gomes {
4139c66d156SVinicius Costa Gomes 	/* Make sure no other flag bits are set. */
4149c66d156SVinicius Costa Gomes 	if (flags & ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST |
4159c66d156SVinicius Costa Gomes 		      TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
4169c66d156SVinicius Costa Gomes 		return false;
4179c66d156SVinicius Costa Gomes 	/* txtime-assist and full offload are mutually exclusive */
4189c66d156SVinicius Costa Gomes 	if ((flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&
4199c66d156SVinicius Costa Gomes 	    (flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
4209c66d156SVinicius Costa Gomes 		return false;
4219c66d156SVinicius Costa Gomes 	return true;
4229c66d156SVinicius Costa Gomes }
4239c66d156SVinicius Costa Gomes 
42454002066SVedang Patel /* This returns the tstamp value set by TCP in terms of the set clock. */
42554002066SVedang Patel static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
42654002066SVedang Patel {
42754002066SVedang Patel 	unsigned int offset = skb_network_offset(skb);
42854002066SVedang Patel 	const struct ipv6hdr *ipv6h;
42954002066SVedang Patel 	const struct iphdr *iph;
43054002066SVedang Patel 	struct ipv6hdr _ipv6h;
43154002066SVedang Patel 
43254002066SVedang Patel 	ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
43354002066SVedang Patel 	if (!ipv6h)
43454002066SVedang Patel 		return 0;
43554002066SVedang Patel 
43654002066SVedang Patel 	if (ipv6h->version == 4) {
43754002066SVedang Patel 		iph = (struct iphdr *)ipv6h;
43854002066SVedang Patel 		offset += iph->ihl * 4;
43954002066SVedang Patel 
44054002066SVedang Patel 		/* special-case 6in4 tunnelling, as that is a common way to get
44154002066SVedang Patel 		 * v6 connectivity in the home
44254002066SVedang Patel 		 */
44354002066SVedang Patel 		if (iph->protocol == IPPROTO_IPV6) {
44454002066SVedang Patel 			ipv6h = skb_header_pointer(skb, offset,
44554002066SVedang Patel 						   sizeof(_ipv6h), &_ipv6h);
44654002066SVedang Patel 
44754002066SVedang Patel 			if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
44854002066SVedang Patel 				return 0;
44954002066SVedang Patel 		} else if (iph->protocol != IPPROTO_TCP) {
45054002066SVedang Patel 			return 0;
45154002066SVedang Patel 		}
45254002066SVedang Patel 	} else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) {
45354002066SVedang Patel 		return 0;
45454002066SVedang Patel 	}
45554002066SVedang Patel 
4566dc25401SEric Dumazet 	return taprio_mono_to_any(q, skb->skb_mstamp_ns);
45754002066SVedang Patel }
45854002066SVedang Patel 
4594cfd5779SVedang Patel /* There are a few scenarios where we will have to modify the txtime from
4604cfd5779SVedang Patel  * what is read from next_txtime in sched_entry. They are:
4614cfd5779SVedang Patel  * 1. If txtime is in the past,
4624cfd5779SVedang Patel  *    a. The gate for the traffic class is currently open and packet can be
4634cfd5779SVedang Patel  *       transmitted before it closes, schedule the packet right away.
4644cfd5779SVedang Patel  *    b. If the gate corresponding to the traffic class is going to open later
4654cfd5779SVedang Patel  *       in the cycle, set the txtime of packet to the interval start.
4664cfd5779SVedang Patel  * 2. If txtime is in the future, there are packets corresponding to the
4674cfd5779SVedang Patel  *    current traffic class waiting to be transmitted. So, the following
4684cfd5779SVedang Patel  *    possibilities exist:
4694cfd5779SVedang Patel  *    a. We can transmit the packet before the window containing the txtime
4704cfd5779SVedang Patel  *       closes.
4714cfd5779SVedang Patel  *    b. The window might close before the transmission can be completed
4724cfd5779SVedang Patel  *       successfully. So, schedule the packet in the next open window.
4734cfd5779SVedang Patel  */
4744cfd5779SVedang Patel static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch)
4754cfd5779SVedang Patel {
47654002066SVedang Patel 	ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp;
4774cfd5779SVedang Patel 	struct taprio_sched *q = qdisc_priv(sch);
4784cfd5779SVedang Patel 	struct sched_gate_list *sched, *admin;
4794cfd5779SVedang Patel 	ktime_t minimum_time, now, txtime;
4804cfd5779SVedang Patel 	int len, packet_transmit_time;
4814cfd5779SVedang Patel 	struct sched_entry *entry;
4824cfd5779SVedang Patel 	bool sched_changed;
4834cfd5779SVedang Patel 
4847ede7b03SVedang Patel 	now = taprio_get_time(q);
4854cfd5779SVedang Patel 	minimum_time = ktime_add_ns(now, q->txtime_delay);
4864cfd5779SVedang Patel 
48754002066SVedang Patel 	tcp_tstamp = get_tcp_tstamp(q, skb);
48854002066SVedang Patel 	minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp);
48954002066SVedang Patel 
4904cfd5779SVedang Patel 	rcu_read_lock();
4914cfd5779SVedang Patel 	admin = rcu_dereference(q->admin_sched);
4924cfd5779SVedang Patel 	sched = rcu_dereference(q->oper_sched);
4934cfd5779SVedang Patel 	if (admin && ktime_after(minimum_time, admin->base_time))
4944cfd5779SVedang Patel 		switch_schedules(q, &admin, &sched);
4954cfd5779SVedang Patel 
4964cfd5779SVedang Patel 	/* Until the schedule starts, all the queues are open */
4974cfd5779SVedang Patel 	if (!sched || ktime_before(minimum_time, sched->base_time)) {
4984cfd5779SVedang Patel 		txtime = minimum_time;
4994cfd5779SVedang Patel 		goto done;
5004cfd5779SVedang Patel 	}
5014cfd5779SVedang Patel 
5024cfd5779SVedang Patel 	len = qdisc_pkt_len(skb);
5034cfd5779SVedang Patel 	packet_transmit_time = length_to_duration(q, len);
5044cfd5779SVedang Patel 
5054cfd5779SVedang Patel 	do {
5060deee7aaSJiapeng Zhong 		sched_changed = false;
5074cfd5779SVedang Patel 
5084cfd5779SVedang Patel 		entry = find_entry_to_transmit(skb, sch, sched, admin,
5094cfd5779SVedang Patel 					       minimum_time,
5104cfd5779SVedang Patel 					       &interval_start, &interval_end,
5114cfd5779SVedang Patel 					       false);
5124cfd5779SVedang Patel 		if (!entry) {
5134cfd5779SVedang Patel 			txtime = 0;
5144cfd5779SVedang Patel 			goto done;
5154cfd5779SVedang Patel 		}
5164cfd5779SVedang Patel 
5174cfd5779SVedang Patel 		txtime = entry->next_txtime;
5184cfd5779SVedang Patel 		txtime = max_t(ktime_t, txtime, minimum_time);
5194cfd5779SVedang Patel 		txtime = max_t(ktime_t, txtime, interval_start);
5204cfd5779SVedang Patel 
5214cfd5779SVedang Patel 		if (admin && admin != sched &&
5224cfd5779SVedang Patel 		    ktime_after(txtime, admin->base_time)) {
5234cfd5779SVedang Patel 			sched = admin;
5240deee7aaSJiapeng Zhong 			sched_changed = true;
5254cfd5779SVedang Patel 			continue;
5264cfd5779SVedang Patel 		}
5274cfd5779SVedang Patel 
5284cfd5779SVedang Patel 		transmit_end_time = ktime_add(txtime, packet_transmit_time);
5294cfd5779SVedang Patel 		minimum_time = transmit_end_time;
5304cfd5779SVedang Patel 
5314cfd5779SVedang Patel 		/* Update the txtime of current entry to the next time it's
5324cfd5779SVedang Patel 		 * interval starts.
5334cfd5779SVedang Patel 		 */
5344cfd5779SVedang Patel 		if (ktime_after(transmit_end_time, interval_end))
5354cfd5779SVedang Patel 			entry->next_txtime = ktime_add(interval_start, sched->cycle_time);
5364cfd5779SVedang Patel 	} while (sched_changed || ktime_after(transmit_end_time, interval_end));
5374cfd5779SVedang Patel 
5384cfd5779SVedang Patel 	entry->next_txtime = transmit_end_time;
5394cfd5779SVedang Patel 
5404cfd5779SVedang Patel done:
5414cfd5779SVedang Patel 	rcu_read_unlock();
5424cfd5779SVedang Patel 	return txtime;
5434cfd5779SVedang Patel }
5444cfd5779SVedang Patel 
545a878fd46SVladimir Oltean /* Devices with full offload are expected to honor this in hardware */
546a878fd46SVladimir Oltean static bool taprio_skb_exceeds_queue_max_sdu(struct Qdisc *sch,
547a878fd46SVladimir Oltean 					     struct sk_buff *skb)
548a878fd46SVladimir Oltean {
549a878fd46SVladimir Oltean 	struct taprio_sched *q = qdisc_priv(sch);
550a878fd46SVladimir Oltean 	struct net_device *dev = qdisc_dev(sch);
551a878fd46SVladimir Oltean 	struct sched_gate_list *sched;
552a878fd46SVladimir Oltean 	int prio = skb->priority;
553a878fd46SVladimir Oltean 	bool exceeds = false;
554a878fd46SVladimir Oltean 	u8 tc;
555a878fd46SVladimir Oltean 
556a878fd46SVladimir Oltean 	tc = netdev_get_prio_tc_map(dev, prio);
557a878fd46SVladimir Oltean 
558a878fd46SVladimir Oltean 	rcu_read_lock();
559a878fd46SVladimir Oltean 	sched = rcu_dereference(q->oper_sched);
560a878fd46SVladimir Oltean 	if (sched && skb->len > sched->max_frm_len[tc])
561a878fd46SVladimir Oltean 		exceeds = true;
562a878fd46SVladimir Oltean 	rcu_read_unlock();
563a878fd46SVladimir Oltean 
564a878fd46SVladimir Oltean 	return exceeds;
565a878fd46SVladimir Oltean }
566a878fd46SVladimir Oltean 
567497cc002SKurt Kanzenbach static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
568497cc002SKurt Kanzenbach 			      struct Qdisc *child, struct sk_buff **to_free)
5695a781ccbSVinicius Costa Gomes {
5705a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
5715a781ccbSVinicius Costa Gomes 
572e8a64bbaSBenedikt Spranger 	/* sk_flags are only safe to use on full sockets. */
573e8a64bbaSBenedikt Spranger 	if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) {
5744cfd5779SVedang Patel 		if (!is_valid_interval(skb, sch))
5754cfd5779SVedang Patel 			return qdisc_drop(skb, sch, to_free);
5764cfd5779SVedang Patel 	} else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
5774cfd5779SVedang Patel 		skb->tstamp = get_packet_txtime(skb, sch);
5784cfd5779SVedang Patel 		if (!skb->tstamp)
5794cfd5779SVedang Patel 			return qdisc_drop(skb, sch, to_free);
5804cfd5779SVedang Patel 	}
5814cfd5779SVedang Patel 
5825a781ccbSVinicius Costa Gomes 	qdisc_qstats_backlog_inc(sch, skb);
5835a781ccbSVinicius Costa Gomes 	sch->q.qlen++;
5845a781ccbSVinicius Costa Gomes 
585ac5c66f2SPetr Machata 	return qdisc_enqueue(skb, child, to_free);
5865a781ccbSVinicius Costa Gomes }
5875a781ccbSVinicius Costa Gomes 
5882d5e8071SVladimir Oltean static int taprio_enqueue_segmented(struct sk_buff *skb, struct Qdisc *sch,
5892d5e8071SVladimir Oltean 				    struct Qdisc *child,
590497cc002SKurt Kanzenbach 				    struct sk_buff **to_free)
591497cc002SKurt Kanzenbach {
592497cc002SKurt Kanzenbach 	unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb);
593497cc002SKurt Kanzenbach 	netdev_features_t features = netif_skb_features(skb);
594497cc002SKurt Kanzenbach 	struct sk_buff *segs, *nskb;
595497cc002SKurt Kanzenbach 	int ret;
596497cc002SKurt Kanzenbach 
597497cc002SKurt Kanzenbach 	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
598497cc002SKurt Kanzenbach 	if (IS_ERR_OR_NULL(segs))
599497cc002SKurt Kanzenbach 		return qdisc_drop(skb, sch, to_free);
600497cc002SKurt Kanzenbach 
601497cc002SKurt Kanzenbach 	skb_list_walk_safe(segs, segs, nskb) {
602497cc002SKurt Kanzenbach 		skb_mark_not_on_list(segs);
603497cc002SKurt Kanzenbach 		qdisc_skb_cb(segs)->pkt_len = segs->len;
604497cc002SKurt Kanzenbach 		slen += segs->len;
605497cc002SKurt Kanzenbach 
60639b02d6dSVladimir Oltean 		/* FIXME: we should be segmenting to a smaller size
60739b02d6dSVladimir Oltean 		 * rather than dropping these
60839b02d6dSVladimir Oltean 		 */
60939b02d6dSVladimir Oltean 		if (taprio_skb_exceeds_queue_max_sdu(sch, segs))
61039b02d6dSVladimir Oltean 			ret = qdisc_drop(segs, sch, to_free);
61139b02d6dSVladimir Oltean 		else
612497cc002SKurt Kanzenbach 			ret = taprio_enqueue_one(segs, sch, child, to_free);
61339b02d6dSVladimir Oltean 
614497cc002SKurt Kanzenbach 		if (ret != NET_XMIT_SUCCESS) {
615497cc002SKurt Kanzenbach 			if (net_xmit_drop_count(ret))
616497cc002SKurt Kanzenbach 				qdisc_qstats_drop(sch);
617497cc002SKurt Kanzenbach 		} else {
618497cc002SKurt Kanzenbach 			numsegs++;
619497cc002SKurt Kanzenbach 		}
620497cc002SKurt Kanzenbach 	}
621497cc002SKurt Kanzenbach 
622497cc002SKurt Kanzenbach 	if (numsegs > 1)
623497cc002SKurt Kanzenbach 		qdisc_tree_reduce_backlog(sch, 1 - numsegs, len - slen);
624497cc002SKurt Kanzenbach 	consume_skb(skb);
625497cc002SKurt Kanzenbach 
626497cc002SKurt Kanzenbach 	return numsegs > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
627497cc002SKurt Kanzenbach }
628497cc002SKurt Kanzenbach 
6292d5e8071SVladimir Oltean /* Will not be called in the full offload case, since the TX queues are
6302d5e8071SVladimir Oltean  * attached to the Qdisc created using qdisc_create_dflt()
6312d5e8071SVladimir Oltean  */
6322d5e8071SVladimir Oltean static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
6332d5e8071SVladimir Oltean 			  struct sk_buff **to_free)
6342d5e8071SVladimir Oltean {
6352d5e8071SVladimir Oltean 	struct taprio_sched *q = qdisc_priv(sch);
6362d5e8071SVladimir Oltean 	struct Qdisc *child;
6372d5e8071SVladimir Oltean 	int queue;
6382d5e8071SVladimir Oltean 
6392d5e8071SVladimir Oltean 	queue = skb_get_queue_mapping(skb);
6402d5e8071SVladimir Oltean 
6412d5e8071SVladimir Oltean 	child = q->qdiscs[queue];
6422d5e8071SVladimir Oltean 	if (unlikely(!child))
6432d5e8071SVladimir Oltean 		return qdisc_drop(skb, sch, to_free);
6442d5e8071SVladimir Oltean 
64539b02d6dSVladimir Oltean 	if (taprio_skb_exceeds_queue_max_sdu(sch, skb)) {
64639b02d6dSVladimir Oltean 		/* Large packets might not be transmitted when the transmission
64739b02d6dSVladimir Oltean 		 * duration exceeds any configured interval. Therefore, segment
64839b02d6dSVladimir Oltean 		 * the skb into smaller chunks. Drivers with full offload are
64939b02d6dSVladimir Oltean 		 * expected to handle this in hardware.
6502d5e8071SVladimir Oltean 		 */
6512d5e8071SVladimir Oltean 		if (skb_is_gso(skb))
65239b02d6dSVladimir Oltean 			return taprio_enqueue_segmented(skb, sch, child,
65339b02d6dSVladimir Oltean 							to_free);
65439b02d6dSVladimir Oltean 
65539b02d6dSVladimir Oltean 		return qdisc_drop(skb, sch, to_free);
65639b02d6dSVladimir Oltean 	}
6572d5e8071SVladimir Oltean 
658497cc002SKurt Kanzenbach 	return taprio_enqueue_one(skb, sch, child, to_free);
659497cc002SKurt Kanzenbach }
660497cc002SKurt Kanzenbach 
66125becba6SVladimir Oltean static struct sk_buff *taprio_peek(struct Qdisc *sch)
6625a781ccbSVinicius Costa Gomes {
663ecc0cc98SVladimir Oltean 	WARN_ONCE(1, "taprio only supports operating as root qdisc, peek() not implemented");
6645a781ccbSVinicius Costa Gomes 	return NULL;
6655a781ccbSVinicius Costa Gomes }
6665a781ccbSVinicius Costa Gomes 
667d2ad689dSVladimir Oltean static void taprio_set_budgets(struct taprio_sched *q,
668d2ad689dSVladimir Oltean 			       struct sched_gate_list *sched,
669d2ad689dSVladimir Oltean 			       struct sched_entry *entry)
67023bddf69SJakub Kicinski {
671d2ad689dSVladimir Oltean 	struct net_device *dev = qdisc_dev(q->root);
672d2ad689dSVladimir Oltean 	int num_tc = netdev_get_num_tc(dev);
673d2ad689dSVladimir Oltean 	int tc, budget;
674d2ad689dSVladimir Oltean 
675d2ad689dSVladimir Oltean 	for (tc = 0; tc < num_tc; tc++) {
676d2ad689dSVladimir Oltean 		/* Traffic classes which never close have infinite budget */
677d2ad689dSVladimir Oltean 		if (entry->gate_duration[tc] == sched->cycle_time)
678d2ad689dSVladimir Oltean 			budget = INT_MAX;
679d2ad689dSVladimir Oltean 		else
680d2ad689dSVladimir Oltean 			budget = div64_u64((u64)entry->gate_duration[tc] * PSEC_PER_NSEC,
681d2ad689dSVladimir Oltean 					   atomic64_read(&q->picos_per_byte));
682d2ad689dSVladimir Oltean 
683d2ad689dSVladimir Oltean 		atomic_set(&entry->budget[tc], budget);
684d2ad689dSVladimir Oltean 	}
685d2ad689dSVladimir Oltean }
686d2ad689dSVladimir Oltean 
687d2ad689dSVladimir Oltean /* When an skb is sent, it consumes from the budget of all traffic classes */
688d2ad689dSVladimir Oltean static int taprio_update_budgets(struct sched_entry *entry, size_t len,
689d2ad689dSVladimir Oltean 				 int tc_consumed, int num_tc)
690d2ad689dSVladimir Oltean {
691d2ad689dSVladimir Oltean 	int tc, budget, new_budget = 0;
692d2ad689dSVladimir Oltean 
693d2ad689dSVladimir Oltean 	for (tc = 0; tc < num_tc; tc++) {
694d2ad689dSVladimir Oltean 		budget = atomic_read(&entry->budget[tc]);
695d2ad689dSVladimir Oltean 		/* Don't consume from infinite budget */
696d2ad689dSVladimir Oltean 		if (budget == INT_MAX) {
697d2ad689dSVladimir Oltean 			if (tc == tc_consumed)
698d2ad689dSVladimir Oltean 				new_budget = budget;
699d2ad689dSVladimir Oltean 			continue;
700d2ad689dSVladimir Oltean 		}
701d2ad689dSVladimir Oltean 
702d2ad689dSVladimir Oltean 		if (tc == tc_consumed)
703d2ad689dSVladimir Oltean 			new_budget = atomic_sub_return(len, &entry->budget[tc]);
704d2ad689dSVladimir Oltean 		else
705d2ad689dSVladimir Oltean 			atomic_sub(len, &entry->budget[tc]);
706d2ad689dSVladimir Oltean 	}
707d2ad689dSVladimir Oltean 
708d2ad689dSVladimir Oltean 	return new_budget;
7095a781ccbSVinicius Costa Gomes }
7105a781ccbSVinicius Costa Gomes 
71192f96667SVladimir Oltean static struct sk_buff *taprio_dequeue_from_txq(struct Qdisc *sch, int txq,
71292f96667SVladimir Oltean 					       struct sched_entry *entry,
71392f96667SVladimir Oltean 					       u32 gate_mask)
71492f96667SVladimir Oltean {
71592f96667SVladimir Oltean 	struct taprio_sched *q = qdisc_priv(sch);
71692f96667SVladimir Oltean 	struct net_device *dev = qdisc_dev(sch);
71792f96667SVladimir Oltean 	struct Qdisc *child = q->qdiscs[txq];
718d2ad689dSVladimir Oltean 	int num_tc = netdev_get_num_tc(dev);
71992f96667SVladimir Oltean 	struct sk_buff *skb;
72092f96667SVladimir Oltean 	ktime_t guard;
72192f96667SVladimir Oltean 	int prio;
72292f96667SVladimir Oltean 	int len;
72392f96667SVladimir Oltean 	u8 tc;
72492f96667SVladimir Oltean 
72592f96667SVladimir Oltean 	if (unlikely(!child))
72692f96667SVladimir Oltean 		return NULL;
72792f96667SVladimir Oltean 
7284c229427SVladimir Oltean 	if (TXTIME_ASSIST_IS_ENABLED(q->flags))
7294c229427SVladimir Oltean 		goto skip_peek_checks;
73092f96667SVladimir Oltean 
73192f96667SVladimir Oltean 	skb = child->ops->peek(child);
73292f96667SVladimir Oltean 	if (!skb)
73392f96667SVladimir Oltean 		return NULL;
73492f96667SVladimir Oltean 
73592f96667SVladimir Oltean 	prio = skb->priority;
73692f96667SVladimir Oltean 	tc = netdev_get_prio_tc_map(dev, prio);
73792f96667SVladimir Oltean 
73892f96667SVladimir Oltean 	if (!(gate_mask & BIT(tc)))
73992f96667SVladimir Oltean 		return NULL;
74092f96667SVladimir Oltean 
74192f96667SVladimir Oltean 	len = qdisc_pkt_len(skb);
74292f96667SVladimir Oltean 	guard = ktime_add_ns(taprio_get_time(q), length_to_duration(q, len));
74392f96667SVladimir Oltean 
74492f96667SVladimir Oltean 	/* In the case that there's no gate entry, there's no
74592f96667SVladimir Oltean 	 * guard band ...
74692f96667SVladimir Oltean 	 */
74792f96667SVladimir Oltean 	if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
748a1e6ad30SVladimir Oltean 	    !taprio_entry_allows_tx(guard, entry, tc))
74992f96667SVladimir Oltean 		return NULL;
75092f96667SVladimir Oltean 
75192f96667SVladimir Oltean 	/* ... and no budget. */
75292f96667SVladimir Oltean 	if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
753d2ad689dSVladimir Oltean 	    taprio_update_budgets(entry, len, tc, num_tc) < 0)
75492f96667SVladimir Oltean 		return NULL;
75592f96667SVladimir Oltean 
7564c229427SVladimir Oltean skip_peek_checks:
75792f96667SVladimir Oltean 	skb = child->ops->dequeue(child);
75892f96667SVladimir Oltean 	if (unlikely(!skb))
75992f96667SVladimir Oltean 		return NULL;
76092f96667SVladimir Oltean 
76192f96667SVladimir Oltean 	qdisc_bstats_update(sch, skb);
76292f96667SVladimir Oltean 	qdisc_qstats_backlog_dec(sch, skb);
76392f96667SVladimir Oltean 	sch->q.qlen--;
76492f96667SVladimir Oltean 
76592f96667SVladimir Oltean 	return skb;
76692f96667SVladimir Oltean }
76792f96667SVladimir Oltean 
7682f530df7SVladimir Oltean static void taprio_next_tc_txq(struct net_device *dev, int tc, int *txq)
7692f530df7SVladimir Oltean {
7702f530df7SVladimir Oltean 	int offset = dev->tc_to_txq[tc].offset;
7712f530df7SVladimir Oltean 	int count = dev->tc_to_txq[tc].count;
7722f530df7SVladimir Oltean 
7732f530df7SVladimir Oltean 	(*txq)++;
7742f530df7SVladimir Oltean 	if (*txq == offset + count)
7752f530df7SVladimir Oltean 		*txq = offset;
7762f530df7SVladimir Oltean }
7772f530df7SVladimir Oltean 
7782f530df7SVladimir Oltean /* Prioritize higher traffic classes, and select among TXQs belonging to the
7792f530df7SVladimir Oltean  * same TC using round robin
7802f530df7SVladimir Oltean  */
7812f530df7SVladimir Oltean static struct sk_buff *taprio_dequeue_tc_priority(struct Qdisc *sch,
7822f530df7SVladimir Oltean 						  struct sched_entry *entry,
7832f530df7SVladimir Oltean 						  u32 gate_mask)
7842f530df7SVladimir Oltean {
7852f530df7SVladimir Oltean 	struct taprio_sched *q = qdisc_priv(sch);
7862f530df7SVladimir Oltean 	struct net_device *dev = qdisc_dev(sch);
7872f530df7SVladimir Oltean 	int num_tc = netdev_get_num_tc(dev);
7882f530df7SVladimir Oltean 	struct sk_buff *skb;
7892f530df7SVladimir Oltean 	int tc;
7902f530df7SVladimir Oltean 
7912f530df7SVladimir Oltean 	for (tc = num_tc - 1; tc >= 0; tc--) {
7922f530df7SVladimir Oltean 		int first_txq = q->cur_txq[tc];
7932f530df7SVladimir Oltean 
7942f530df7SVladimir Oltean 		if (!(gate_mask & BIT(tc)))
7952f530df7SVladimir Oltean 			continue;
7962f530df7SVladimir Oltean 
7972f530df7SVladimir Oltean 		do {
7982f530df7SVladimir Oltean 			skb = taprio_dequeue_from_txq(sch, q->cur_txq[tc],
7992f530df7SVladimir Oltean 						      entry, gate_mask);
8002f530df7SVladimir Oltean 
8012f530df7SVladimir Oltean 			taprio_next_tc_txq(dev, tc, &q->cur_txq[tc]);
8022f530df7SVladimir Oltean 
8032f530df7SVladimir Oltean 			if (skb)
8042f530df7SVladimir Oltean 				return skb;
8052f530df7SVladimir Oltean 		} while (q->cur_txq[tc] != first_txq);
8062f530df7SVladimir Oltean 	}
8072f530df7SVladimir Oltean 
8082f530df7SVladimir Oltean 	return NULL;
8092f530df7SVladimir Oltean }
8102f530df7SVladimir Oltean 
8112f530df7SVladimir Oltean /* Broken way of prioritizing smaller TXQ indices and ignoring the traffic
8122f530df7SVladimir Oltean  * class other than to determine whether the gate is open or not
8132f530df7SVladimir Oltean  */
8142f530df7SVladimir Oltean static struct sk_buff *taprio_dequeue_txq_priority(struct Qdisc *sch,
8152f530df7SVladimir Oltean 						   struct sched_entry *entry,
8162f530df7SVladimir Oltean 						   u32 gate_mask)
8172f530df7SVladimir Oltean {
8182f530df7SVladimir Oltean 	struct net_device *dev = qdisc_dev(sch);
8192f530df7SVladimir Oltean 	struct sk_buff *skb;
8202f530df7SVladimir Oltean 	int i;
8212f530df7SVladimir Oltean 
8222f530df7SVladimir Oltean 	for (i = 0; i < dev->num_tx_queues; i++) {
8232f530df7SVladimir Oltean 		skb = taprio_dequeue_from_txq(sch, i, entry, gate_mask);
8242f530df7SVladimir Oltean 		if (skb)
8252f530df7SVladimir Oltean 			return skb;
8262f530df7SVladimir Oltean 	}
8272f530df7SVladimir Oltean 
8282f530df7SVladimir Oltean 	return NULL;
8292f530df7SVladimir Oltean }
8302f530df7SVladimir Oltean 
8312c08a4f8SVladimir Oltean /* Will not be called in the full offload case, since the TX queues are
8322c08a4f8SVladimir Oltean  * attached to the Qdisc created using qdisc_create_dflt()
8332c08a4f8SVladimir Oltean  */
83425becba6SVladimir Oltean static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
8355a781ccbSVinicius Costa Gomes {
8365a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
8378c79f0eaSVinicius Costa Gomes 	struct sk_buff *skb = NULL;
8385a781ccbSVinicius Costa Gomes 	struct sched_entry *entry;
8395a781ccbSVinicius Costa Gomes 	u32 gate_mask;
8405a781ccbSVinicius Costa Gomes 
8415a781ccbSVinicius Costa Gomes 	rcu_read_lock();
8425a781ccbSVinicius Costa Gomes 	entry = rcu_dereference(q->current_entry);
8435a781ccbSVinicius Costa Gomes 	/* if there's no entry, it means that the schedule didn't
8445a781ccbSVinicius Costa Gomes 	 * start yet, so force all gates to be open, this is in
8455a781ccbSVinicius Costa Gomes 	 * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
846633fa666SJesper Dangaard Brouer 	 * "AdminGateStates"
8475a781ccbSVinicius Costa Gomes 	 */
8485a781ccbSVinicius Costa Gomes 	gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
8495a781ccbSVinicius Costa Gomes 	if (!gate_mask)
8508c79f0eaSVinicius Costa Gomes 		goto done;
8515a781ccbSVinicius Costa Gomes 
8522f530df7SVladimir Oltean 	if (static_branch_unlikely(&taprio_have_broken_mqprio) &&
8532f530df7SVladimir Oltean 	    !static_branch_likely(&taprio_have_working_mqprio)) {
8542f530df7SVladimir Oltean 		/* Single NIC kind which is broken */
8552f530df7SVladimir Oltean 		skb = taprio_dequeue_txq_priority(sch, entry, gate_mask);
8562f530df7SVladimir Oltean 	} else if (static_branch_likely(&taprio_have_working_mqprio) &&
8572f530df7SVladimir Oltean 		   !static_branch_unlikely(&taprio_have_broken_mqprio)) {
8582f530df7SVladimir Oltean 		/* Single NIC kind which prioritizes properly */
8592f530df7SVladimir Oltean 		skb = taprio_dequeue_tc_priority(sch, entry, gate_mask);
8602f530df7SVladimir Oltean 	} else {
8612f530df7SVladimir Oltean 		/* Mixed NIC kinds present in system, need dynamic testing */
8622f530df7SVladimir Oltean 		if (q->broken_mqprio)
8632f530df7SVladimir Oltean 			skb = taprio_dequeue_txq_priority(sch, entry, gate_mask);
8642f530df7SVladimir Oltean 		else
8652f530df7SVladimir Oltean 			skb = taprio_dequeue_tc_priority(sch, entry, gate_mask);
8665a781ccbSVinicius Costa Gomes 	}
8675a781ccbSVinicius Costa Gomes 
8688c79f0eaSVinicius Costa Gomes done:
8698c79f0eaSVinicius Costa Gomes 	rcu_read_unlock();
8708c79f0eaSVinicius Costa Gomes 
8718c79f0eaSVinicius Costa Gomes 	return skb;
8725a781ccbSVinicius Costa Gomes }
8735a781ccbSVinicius Costa Gomes 
8746ca6a665SVinicius Costa Gomes static bool should_restart_cycle(const struct sched_gate_list *oper,
8756ca6a665SVinicius Costa Gomes 				 const struct sched_entry *entry)
8766ca6a665SVinicius Costa Gomes {
8776ca6a665SVinicius Costa Gomes 	if (list_is_last(&entry->list, &oper->entries))
8786ca6a665SVinicius Costa Gomes 		return true;
8796ca6a665SVinicius Costa Gomes 
880e5517551SVladimir Oltean 	if (ktime_compare(entry->end_time, oper->cycle_end_time) == 0)
8816ca6a665SVinicius Costa Gomes 		return true;
8826ca6a665SVinicius Costa Gomes 
8836ca6a665SVinicius Costa Gomes 	return false;
8846ca6a665SVinicius Costa Gomes }
8856ca6a665SVinicius Costa Gomes 
886a3d43c0dSVinicius Costa Gomes static bool should_change_schedules(const struct sched_gate_list *admin,
887a3d43c0dSVinicius Costa Gomes 				    const struct sched_gate_list *oper,
888e5517551SVladimir Oltean 				    ktime_t end_time)
889a3d43c0dSVinicius Costa Gomes {
890c25031e9SVinicius Costa Gomes 	ktime_t next_base_time, extension_time;
891a3d43c0dSVinicius Costa Gomes 
892a3d43c0dSVinicius Costa Gomes 	if (!admin)
893a3d43c0dSVinicius Costa Gomes 		return false;
894a3d43c0dSVinicius Costa Gomes 
895a3d43c0dSVinicius Costa Gomes 	next_base_time = sched_base_time(admin);
896a3d43c0dSVinicius Costa Gomes 
897e5517551SVladimir Oltean 	/* This is the simple case, the end_time would fall after
898a3d43c0dSVinicius Costa Gomes 	 * the next schedule base_time.
899a3d43c0dSVinicius Costa Gomes 	 */
900e5517551SVladimir Oltean 	if (ktime_compare(next_base_time, end_time) <= 0)
901a3d43c0dSVinicius Costa Gomes 		return true;
902a3d43c0dSVinicius Costa Gomes 
903e5517551SVladimir Oltean 	/* This is the cycle_time_extension case, if the end_time
904c25031e9SVinicius Costa Gomes 	 * plus the amount that can be extended would fall after the
905c25031e9SVinicius Costa Gomes 	 * next schedule base_time, we can extend the current schedule
906c25031e9SVinicius Costa Gomes 	 * for that amount.
907c25031e9SVinicius Costa Gomes 	 */
908e5517551SVladimir Oltean 	extension_time = ktime_add_ns(end_time, oper->cycle_time_extension);
909c25031e9SVinicius Costa Gomes 
910c25031e9SVinicius Costa Gomes 	/* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about
911c25031e9SVinicius Costa Gomes 	 * how precisely the extension should be made. So after
912c25031e9SVinicius Costa Gomes 	 * conformance testing, this logic may change.
913c25031e9SVinicius Costa Gomes 	 */
914c25031e9SVinicius Costa Gomes 	if (ktime_compare(next_base_time, extension_time) <= 0)
915c25031e9SVinicius Costa Gomes 		return true;
916c25031e9SVinicius Costa Gomes 
917a3d43c0dSVinicius Costa Gomes 	return false;
918a3d43c0dSVinicius Costa Gomes }
919a3d43c0dSVinicius Costa Gomes 
9205a781ccbSVinicius Costa Gomes static enum hrtimer_restart advance_sched(struct hrtimer *timer)
9215a781ccbSVinicius Costa Gomes {
9225a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = container_of(timer, struct taprio_sched,
9235a781ccbSVinicius Costa Gomes 					      advance_timer);
924a1e6ad30SVladimir Oltean 	struct net_device *dev = qdisc_dev(q->root);
925a3d43c0dSVinicius Costa Gomes 	struct sched_gate_list *oper, *admin;
926a1e6ad30SVladimir Oltean 	int num_tc = netdev_get_num_tc(dev);
9275a781ccbSVinicius Costa Gomes 	struct sched_entry *entry, *next;
9285a781ccbSVinicius Costa Gomes 	struct Qdisc *sch = q->root;
929e5517551SVladimir Oltean 	ktime_t end_time;
930a1e6ad30SVladimir Oltean 	int tc;
9315a781ccbSVinicius Costa Gomes 
9325a781ccbSVinicius Costa Gomes 	spin_lock(&q->current_entry_lock);
9335a781ccbSVinicius Costa Gomes 	entry = rcu_dereference_protected(q->current_entry,
9345a781ccbSVinicius Costa Gomes 					  lockdep_is_held(&q->current_entry_lock));
935a3d43c0dSVinicius Costa Gomes 	oper = rcu_dereference_protected(q->oper_sched,
936a3d43c0dSVinicius Costa Gomes 					 lockdep_is_held(&q->current_entry_lock));
937a3d43c0dSVinicius Costa Gomes 	admin = rcu_dereference_protected(q->admin_sched,
938a3d43c0dSVinicius Costa Gomes 					  lockdep_is_held(&q->current_entry_lock));
9395a781ccbSVinicius Costa Gomes 
940a3d43c0dSVinicius Costa Gomes 	if (!oper)
941a3d43c0dSVinicius Costa Gomes 		switch_schedules(q, &admin, &oper);
942a3d43c0dSVinicius Costa Gomes 
943a3d43c0dSVinicius Costa Gomes 	/* This can happen in two cases: 1. this is the very first run
944a3d43c0dSVinicius Costa Gomes 	 * of this function (i.e. we weren't running any schedule
945a3d43c0dSVinicius Costa Gomes 	 * previously); 2. The previous schedule just ended. The first
946a3d43c0dSVinicius Costa Gomes 	 * entry of all schedules are pre-calculated during the
947a3d43c0dSVinicius Costa Gomes 	 * schedule initialization.
9485a781ccbSVinicius Costa Gomes 	 */
949e5517551SVladimir Oltean 	if (unlikely(!entry || entry->end_time == oper->base_time)) {
950a3d43c0dSVinicius Costa Gomes 		next = list_first_entry(&oper->entries, struct sched_entry,
9515a781ccbSVinicius Costa Gomes 					list);
952e5517551SVladimir Oltean 		end_time = next->end_time;
9535a781ccbSVinicius Costa Gomes 		goto first_run;
9545a781ccbSVinicius Costa Gomes 	}
9555a781ccbSVinicius Costa Gomes 
9566ca6a665SVinicius Costa Gomes 	if (should_restart_cycle(oper, entry)) {
957a3d43c0dSVinicius Costa Gomes 		next = list_first_entry(&oper->entries, struct sched_entry,
9585a781ccbSVinicius Costa Gomes 					list);
959e5517551SVladimir Oltean 		oper->cycle_end_time = ktime_add_ns(oper->cycle_end_time,
9606ca6a665SVinicius Costa Gomes 						    oper->cycle_time);
9616ca6a665SVinicius Costa Gomes 	} else {
9625a781ccbSVinicius Costa Gomes 		next = list_next_entry(entry, list);
9636ca6a665SVinicius Costa Gomes 	}
9645a781ccbSVinicius Costa Gomes 
965e5517551SVladimir Oltean 	end_time = ktime_add_ns(entry->end_time, next->interval);
966e5517551SVladimir Oltean 	end_time = min_t(ktime_t, end_time, oper->cycle_end_time);
9675a781ccbSVinicius Costa Gomes 
968a1e6ad30SVladimir Oltean 	for (tc = 0; tc < num_tc; tc++) {
969a1e6ad30SVladimir Oltean 		if (next->gate_duration[tc] == oper->cycle_time)
970a1e6ad30SVladimir Oltean 			next->gate_close_time[tc] = KTIME_MAX;
971a1e6ad30SVladimir Oltean 		else
972a1e6ad30SVladimir Oltean 			next->gate_close_time[tc] = ktime_add_ns(entry->end_time,
973a1e6ad30SVladimir Oltean 								 next->gate_duration[tc]);
974a1e6ad30SVladimir Oltean 	}
975a1e6ad30SVladimir Oltean 
976e5517551SVladimir Oltean 	if (should_change_schedules(admin, oper, end_time)) {
977a3d43c0dSVinicius Costa Gomes 		/* Set things so the next time this runs, the new
978a3d43c0dSVinicius Costa Gomes 		 * schedule runs.
979a3d43c0dSVinicius Costa Gomes 		 */
980e5517551SVladimir Oltean 		end_time = sched_base_time(admin);
981a3d43c0dSVinicius Costa Gomes 		switch_schedules(q, &admin, &oper);
982a3d43c0dSVinicius Costa Gomes 	}
983a3d43c0dSVinicius Costa Gomes 
984e5517551SVladimir Oltean 	next->end_time = end_time;
985d2ad689dSVladimir Oltean 	taprio_set_budgets(q, oper, next);
9865a781ccbSVinicius Costa Gomes 
9875a781ccbSVinicius Costa Gomes first_run:
9885a781ccbSVinicius Costa Gomes 	rcu_assign_pointer(q->current_entry, next);
9895a781ccbSVinicius Costa Gomes 	spin_unlock(&q->current_entry_lock);
9905a781ccbSVinicius Costa Gomes 
991e5517551SVladimir Oltean 	hrtimer_set_expires(&q->advance_timer, end_time);
9925a781ccbSVinicius Costa Gomes 
9935a781ccbSVinicius Costa Gomes 	rcu_read_lock();
9945a781ccbSVinicius Costa Gomes 	__netif_schedule(sch);
9955a781ccbSVinicius Costa Gomes 	rcu_read_unlock();
9965a781ccbSVinicius Costa Gomes 
9975a781ccbSVinicius Costa Gomes 	return HRTIMER_RESTART;
9985a781ccbSVinicius Costa Gomes }
9995a781ccbSVinicius Costa Gomes 
10005a781ccbSVinicius Costa Gomes static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
10015a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_SCHED_ENTRY_INDEX]	   = { .type = NLA_U32 },
10025a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_SCHED_ENTRY_CMD]	   = { .type = NLA_U8 },
10035a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 },
10045a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]  = { .type = NLA_U32 },
10055a781ccbSVinicius Costa Gomes };
10065a781ccbSVinicius Costa Gomes 
1007a54fc09eSVladimir Oltean static const struct nla_policy taprio_tc_policy[TCA_TAPRIO_TC_ENTRY_MAX + 1] = {
1008a54fc09eSVladimir Oltean 	[TCA_TAPRIO_TC_ENTRY_INDEX]	   = { .type = NLA_U32 },
1009a54fc09eSVladimir Oltean 	[TCA_TAPRIO_TC_ENTRY_MAX_SDU]	   = { .type = NLA_U32 },
1010a721c3e5SVladimir Oltean 	[TCA_TAPRIO_TC_ENTRY_FP]	   = NLA_POLICY_RANGE(NLA_U32,
1011a721c3e5SVladimir Oltean 							      TC_FP_EXPRESS,
1012a721c3e5SVladimir Oltean 							      TC_FP_PREEMPTIBLE),
1013a54fc09eSVladimir Oltean };
1014a54fc09eSVladimir Oltean 
10155a781ccbSVinicius Costa Gomes static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
10165a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_PRIOMAP]	       = {
10175a781ccbSVinicius Costa Gomes 		.len = sizeof(struct tc_mqprio_qopt)
10185a781ccbSVinicius Costa Gomes 	},
10195a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]           = { .type = NLA_NESTED },
10205a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]            = { .type = NLA_S64 },
10215a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]         = { .type = NLA_NESTED },
10225a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_SCHED_CLOCKID]              = { .type = NLA_S32 },
10236ca6a665SVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]           = { .type = NLA_S64 },
1024c25031e9SVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
102549c684d7SVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_FLAGS]                      = { .type = NLA_U32 },
1026e13aaa06SJakub Kicinski 	[TCA_TAPRIO_ATTR_TXTIME_DELAY]		     = { .type = NLA_U32 },
1027a54fc09eSVladimir Oltean 	[TCA_TAPRIO_ATTR_TC_ENTRY]		     = { .type = NLA_NESTED },
10285a781ccbSVinicius Costa Gomes };
10295a781ccbSVinicius Costa Gomes 
1030b5b73b26SVinicius Costa Gomes static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb,
1031b5b73b26SVinicius Costa Gomes 			    struct sched_entry *entry,
10325a781ccbSVinicius Costa Gomes 			    struct netlink_ext_ack *extack)
10335a781ccbSVinicius Costa Gomes {
1034b5b73b26SVinicius Costa Gomes 	int min_duration = length_to_duration(q, ETH_ZLEN);
10355a781ccbSVinicius Costa Gomes 	u32 interval = 0;
10365a781ccbSVinicius Costa Gomes 
10375a781ccbSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
10385a781ccbSVinicius Costa Gomes 		entry->command = nla_get_u8(
10395a781ccbSVinicius Costa Gomes 			tb[TCA_TAPRIO_SCHED_ENTRY_CMD]);
10405a781ccbSVinicius Costa Gomes 
10415a781ccbSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK])
10425a781ccbSVinicius Costa Gomes 		entry->gate_mask = nla_get_u32(
10435a781ccbSVinicius Costa Gomes 			tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]);
10445a781ccbSVinicius Costa Gomes 
10455a781ccbSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL])
10465a781ccbSVinicius Costa Gomes 		interval = nla_get_u32(
10475a781ccbSVinicius Costa Gomes 			tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
10485a781ccbSVinicius Costa Gomes 
1049b5b73b26SVinicius Costa Gomes 	/* The interval should allow at least the minimum ethernet
1050b5b73b26SVinicius Costa Gomes 	 * frame to go out.
1051b5b73b26SVinicius Costa Gomes 	 */
1052b5b73b26SVinicius Costa Gomes 	if (interval < min_duration) {
10535a781ccbSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
10545a781ccbSVinicius Costa Gomes 		return -EINVAL;
10555a781ccbSVinicius Costa Gomes 	}
10565a781ccbSVinicius Costa Gomes 
10575a781ccbSVinicius Costa Gomes 	entry->interval = interval;
10585a781ccbSVinicius Costa Gomes 
10595a781ccbSVinicius Costa Gomes 	return 0;
10605a781ccbSVinicius Costa Gomes }
10615a781ccbSVinicius Costa Gomes 
1062b5b73b26SVinicius Costa Gomes static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n,
1063b5b73b26SVinicius Costa Gomes 			     struct sched_entry *entry, int index,
1064b5b73b26SVinicius Costa Gomes 			     struct netlink_ext_ack *extack)
10655a781ccbSVinicius Costa Gomes {
10665a781ccbSVinicius Costa Gomes 	struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
10675a781ccbSVinicius Costa Gomes 	int err;
10685a781ccbSVinicius Costa Gomes 
10698cb08174SJohannes Berg 	err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n,
10705a781ccbSVinicius Costa Gomes 					  entry_policy, NULL);
10715a781ccbSVinicius Costa Gomes 	if (err < 0) {
10725a781ccbSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Could not parse nested entry");
10735a781ccbSVinicius Costa Gomes 		return -EINVAL;
10745a781ccbSVinicius Costa Gomes 	}
10755a781ccbSVinicius Costa Gomes 
10765a781ccbSVinicius Costa Gomes 	entry->index = index;
10775a781ccbSVinicius Costa Gomes 
1078b5b73b26SVinicius Costa Gomes 	return fill_sched_entry(q, tb, entry, extack);
10795a781ccbSVinicius Costa Gomes }
10805a781ccbSVinicius Costa Gomes 
1081b5b73b26SVinicius Costa Gomes static int parse_sched_list(struct taprio_sched *q, struct nlattr *list,
1082a3d43c0dSVinicius Costa Gomes 			    struct sched_gate_list *sched,
10835a781ccbSVinicius Costa Gomes 			    struct netlink_ext_ack *extack)
10845a781ccbSVinicius Costa Gomes {
10855a781ccbSVinicius Costa Gomes 	struct nlattr *n;
10865a781ccbSVinicius Costa Gomes 	int err, rem;
10875a781ccbSVinicius Costa Gomes 	int i = 0;
10885a781ccbSVinicius Costa Gomes 
10895a781ccbSVinicius Costa Gomes 	if (!list)
10905a781ccbSVinicius Costa Gomes 		return -EINVAL;
10915a781ccbSVinicius Costa Gomes 
10925a781ccbSVinicius Costa Gomes 	nla_for_each_nested(n, list, rem) {
10935a781ccbSVinicius Costa Gomes 		struct sched_entry *entry;
10945a781ccbSVinicius Costa Gomes 
10955a781ccbSVinicius Costa Gomes 		if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) {
10965a781ccbSVinicius Costa Gomes 			NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'");
10975a781ccbSVinicius Costa Gomes 			continue;
10985a781ccbSVinicius Costa Gomes 		}
10995a781ccbSVinicius Costa Gomes 
11005a781ccbSVinicius Costa Gomes 		entry = kzalloc(sizeof(*entry), GFP_KERNEL);
11015a781ccbSVinicius Costa Gomes 		if (!entry) {
11025a781ccbSVinicius Costa Gomes 			NL_SET_ERR_MSG(extack, "Not enough memory for entry");
11035a781ccbSVinicius Costa Gomes 			return -ENOMEM;
11045a781ccbSVinicius Costa Gomes 		}
11055a781ccbSVinicius Costa Gomes 
1106b5b73b26SVinicius Costa Gomes 		err = parse_sched_entry(q, n, entry, i, extack);
11075a781ccbSVinicius Costa Gomes 		if (err < 0) {
11085a781ccbSVinicius Costa Gomes 			kfree(entry);
11095a781ccbSVinicius Costa Gomes 			return err;
11105a781ccbSVinicius Costa Gomes 		}
11115a781ccbSVinicius Costa Gomes 
1112a3d43c0dSVinicius Costa Gomes 		list_add_tail(&entry->list, &sched->entries);
11135a781ccbSVinicius Costa Gomes 		i++;
11145a781ccbSVinicius Costa Gomes 	}
11155a781ccbSVinicius Costa Gomes 
1116a3d43c0dSVinicius Costa Gomes 	sched->num_entries = i;
11175a781ccbSVinicius Costa Gomes 
11185a781ccbSVinicius Costa Gomes 	return i;
11195a781ccbSVinicius Costa Gomes }
11205a781ccbSVinicius Costa Gomes 
1121b5b73b26SVinicius Costa Gomes static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
1122a3d43c0dSVinicius Costa Gomes 				 struct sched_gate_list *new,
11235a781ccbSVinicius Costa Gomes 				 struct netlink_ext_ack *extack)
11245a781ccbSVinicius Costa Gomes {
11255a781ccbSVinicius Costa Gomes 	int err = 0;
11265a781ccbSVinicius Costa Gomes 
1127a3d43c0dSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) {
1128a3d43c0dSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Adding a single entry is not supported");
1129a3d43c0dSVinicius Costa Gomes 		return -ENOTSUPP;
1130a3d43c0dSVinicius Costa Gomes 	}
11315a781ccbSVinicius Costa Gomes 
11325a781ccbSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
1133a3d43c0dSVinicius Costa Gomes 		new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
11345a781ccbSVinicius Costa Gomes 
1135c25031e9SVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION])
1136c25031e9SVinicius Costa Gomes 		new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]);
1137c25031e9SVinicius Costa Gomes 
11386ca6a665SVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME])
11396ca6a665SVinicius Costa Gomes 		new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
11406ca6a665SVinicius Costa Gomes 
11415a781ccbSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
1142b5b73b26SVinicius Costa Gomes 		err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST],
1143b5b73b26SVinicius Costa Gomes 				       new, extack);
1144a3d43c0dSVinicius Costa Gomes 	if (err < 0)
11455a781ccbSVinicius Costa Gomes 		return err;
1146a3d43c0dSVinicius Costa Gomes 
1147037be037SVedang Patel 	if (!new->cycle_time) {
1148037be037SVedang Patel 		struct sched_entry *entry;
1149037be037SVedang Patel 		ktime_t cycle = 0;
1150037be037SVedang Patel 
1151037be037SVedang Patel 		list_for_each_entry(entry, &new->entries, list)
1152037be037SVedang Patel 			cycle = ktime_add_ns(cycle, entry->interval);
1153ed8157f1SDu Cheng 
1154ed8157f1SDu Cheng 		if (!cycle) {
1155ed8157f1SDu Cheng 			NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
1156ed8157f1SDu Cheng 			return -EINVAL;
1157ed8157f1SDu Cheng 		}
1158ed8157f1SDu Cheng 
1159037be037SVedang Patel 		new->cycle_time = cycle;
1160037be037SVedang Patel 	}
1161037be037SVedang Patel 
1162a306a90cSVladimir Oltean 	taprio_calculate_gate_durations(q, new);
1163a306a90cSVladimir Oltean 
1164a3d43c0dSVinicius Costa Gomes 	return 0;
11655a781ccbSVinicius Costa Gomes }
11665a781ccbSVinicius Costa Gomes 
11675a781ccbSVinicius Costa Gomes static int taprio_parse_mqprio_opt(struct net_device *dev,
11685a781ccbSVinicius Costa Gomes 				   struct tc_mqprio_qopt *qopt,
11694cfd5779SVedang Patel 				   struct netlink_ext_ack *extack,
11704cfd5779SVedang Patel 				   u32 taprio_flags)
11715a781ccbSVinicius Costa Gomes {
11721dfe086dSVladimir Oltean 	bool allow_overlapping_txqs = TXTIME_ASSIST_IS_ENABLED(taprio_flags);
11735a781ccbSVinicius Costa Gomes 
1174a3d43c0dSVinicius Costa Gomes 	if (!qopt && !dev->num_tc) {
11755a781ccbSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
11765a781ccbSVinicius Costa Gomes 		return -EINVAL;
11775a781ccbSVinicius Costa Gomes 	}
11785a781ccbSVinicius Costa Gomes 
1179a3d43c0dSVinicius Costa Gomes 	/* If num_tc is already set, it means that the user already
1180a3d43c0dSVinicius Costa Gomes 	 * configured the mqprio part
1181a3d43c0dSVinicius Costa Gomes 	 */
1182a3d43c0dSVinicius Costa Gomes 	if (dev->num_tc)
1183a3d43c0dSVinicius Costa Gomes 		return 0;
1184a3d43c0dSVinicius Costa Gomes 
11855a781ccbSVinicius Costa Gomes 	/* taprio imposes that traffic classes map 1:n to tx queues */
11865a781ccbSVinicius Costa Gomes 	if (qopt->num_tc > dev->num_tx_queues) {
11875a781ccbSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues");
11885a781ccbSVinicius Costa Gomes 		return -EINVAL;
11895a781ccbSVinicius Costa Gomes 	}
11905a781ccbSVinicius Costa Gomes 
11911dfe086dSVladimir Oltean 	/* For some reason, in txtime-assist mode, we allow TXQ ranges for
11921dfe086dSVladimir Oltean 	 * different TCs to overlap, and just validate the TXQ ranges.
11935a781ccbSVinicius Costa Gomes 	 */
11941dfe086dSVladimir Oltean 	return mqprio_validate_qopt(dev, qopt, true, allow_overlapping_txqs,
11951dfe086dSVladimir Oltean 				    extack);
11965a781ccbSVinicius Costa Gomes }
11975a781ccbSVinicius Costa Gomes 
1198a3d43c0dSVinicius Costa Gomes static int taprio_get_start_time(struct Qdisc *sch,
1199a3d43c0dSVinicius Costa Gomes 				 struct sched_gate_list *sched,
1200a3d43c0dSVinicius Costa Gomes 				 ktime_t *start)
12015a781ccbSVinicius Costa Gomes {
12025a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
12035a781ccbSVinicius Costa Gomes 	ktime_t now, base, cycle;
12045a781ccbSVinicius Costa Gomes 	s64 n;
12055a781ccbSVinicius Costa Gomes 
1206a3d43c0dSVinicius Costa Gomes 	base = sched_base_time(sched);
12077ede7b03SVedang Patel 	now = taprio_get_time(q);
12088599099fSAndre Guedes 
12098599099fSAndre Guedes 	if (ktime_after(base, now)) {
12108599099fSAndre Guedes 		*start = base;
12118599099fSAndre Guedes 		return 0;
12128599099fSAndre Guedes 	}
12135a781ccbSVinicius Costa Gomes 
1214037be037SVedang Patel 	cycle = sched->cycle_time;
12155a781ccbSVinicius Costa Gomes 
12168599099fSAndre Guedes 	/* The qdisc is expected to have at least one sched_entry.  Moreover,
12178599099fSAndre Guedes 	 * any entry must have 'interval' > 0. Thus if the cycle time is zero,
12188599099fSAndre Guedes 	 * something went really wrong. In that case, we should warn about this
12198599099fSAndre Guedes 	 * inconsistent state and return error.
12208599099fSAndre Guedes 	 */
12218599099fSAndre Guedes 	if (WARN_ON(!cycle))
12228599099fSAndre Guedes 		return -EFAULT;
12235a781ccbSVinicius Costa Gomes 
12245a781ccbSVinicius Costa Gomes 	/* Schedule the start time for the beginning of the next
12255a781ccbSVinicius Costa Gomes 	 * cycle.
12265a781ccbSVinicius Costa Gomes 	 */
12275a781ccbSVinicius Costa Gomes 	n = div64_s64(ktime_sub_ns(now, base), cycle);
12288599099fSAndre Guedes 	*start = ktime_add_ns(base, (n + 1) * cycle);
12298599099fSAndre Guedes 	return 0;
12305a781ccbSVinicius Costa Gomes }
12315a781ccbSVinicius Costa Gomes 
1232e5517551SVladimir Oltean static void setup_first_end_time(struct taprio_sched *q,
1233a3d43c0dSVinicius Costa Gomes 				 struct sched_gate_list *sched, ktime_t base)
12345a781ccbSVinicius Costa Gomes {
1235a1e6ad30SVladimir Oltean 	struct net_device *dev = qdisc_dev(q->root);
1236a1e6ad30SVladimir Oltean 	int num_tc = netdev_get_num_tc(dev);
12375a781ccbSVinicius Costa Gomes 	struct sched_entry *first;
12386ca6a665SVinicius Costa Gomes 	ktime_t cycle;
1239a1e6ad30SVladimir Oltean 	int tc;
12405a781ccbSVinicius Costa Gomes 
1241a3d43c0dSVinicius Costa Gomes 	first = list_first_entry(&sched->entries,
1242a3d43c0dSVinicius Costa Gomes 				 struct sched_entry, list);
12435a781ccbSVinicius Costa Gomes 
1244037be037SVedang Patel 	cycle = sched->cycle_time;
12456ca6a665SVinicius Costa Gomes 
12466ca6a665SVinicius Costa Gomes 	/* FIXME: find a better place to do this */
1247e5517551SVladimir Oltean 	sched->cycle_end_time = ktime_add_ns(base, cycle);
12486ca6a665SVinicius Costa Gomes 
1249e5517551SVladimir Oltean 	first->end_time = ktime_add_ns(base, first->interval);
1250d2ad689dSVladimir Oltean 	taprio_set_budgets(q, sched, first);
1251a1e6ad30SVladimir Oltean 
1252a1e6ad30SVladimir Oltean 	for (tc = 0; tc < num_tc; tc++) {
1253a1e6ad30SVladimir Oltean 		if (first->gate_duration[tc] == sched->cycle_time)
1254a1e6ad30SVladimir Oltean 			first->gate_close_time[tc] = KTIME_MAX;
1255a1e6ad30SVladimir Oltean 		else
1256a1e6ad30SVladimir Oltean 			first->gate_close_time[tc] = ktime_add_ns(base, first->gate_duration[tc]);
1257a1e6ad30SVladimir Oltean 	}
1258a1e6ad30SVladimir Oltean 
12595a781ccbSVinicius Costa Gomes 	rcu_assign_pointer(q->current_entry, NULL);
1260a3d43c0dSVinicius Costa Gomes }
12615a781ccbSVinicius Costa Gomes 
1262a3d43c0dSVinicius Costa Gomes static void taprio_start_sched(struct Qdisc *sch,
1263a3d43c0dSVinicius Costa Gomes 			       ktime_t start, struct sched_gate_list *new)
1264a3d43c0dSVinicius Costa Gomes {
1265a3d43c0dSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
1266a3d43c0dSVinicius Costa Gomes 	ktime_t expires;
1267a3d43c0dSVinicius Costa Gomes 
12689c66d156SVinicius Costa Gomes 	if (FULL_OFFLOAD_IS_ENABLED(q->flags))
12699c66d156SVinicius Costa Gomes 		return;
12709c66d156SVinicius Costa Gomes 
1271a3d43c0dSVinicius Costa Gomes 	expires = hrtimer_get_expires(&q->advance_timer);
1272a3d43c0dSVinicius Costa Gomes 	if (expires == 0)
1273a3d43c0dSVinicius Costa Gomes 		expires = KTIME_MAX;
1274a3d43c0dSVinicius Costa Gomes 
1275a3d43c0dSVinicius Costa Gomes 	/* If the new schedule starts before the next expiration, we
1276a3d43c0dSVinicius Costa Gomes 	 * reprogram it to the earliest one, so we change the admin
1277a3d43c0dSVinicius Costa Gomes 	 * schedule to the operational one at the right time.
1278a3d43c0dSVinicius Costa Gomes 	 */
1279a3d43c0dSVinicius Costa Gomes 	start = min_t(ktime_t, start, expires);
12805a781ccbSVinicius Costa Gomes 
12815a781ccbSVinicius Costa Gomes 	hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
12825a781ccbSVinicius Costa Gomes }
12835a781ccbSVinicius Costa Gomes 
12847b9eba7bSLeandro Dorileo static void taprio_set_picos_per_byte(struct net_device *dev,
12857b9eba7bSLeandro Dorileo 				      struct taprio_sched *q)
12867b9eba7bSLeandro Dorileo {
12877b9eba7bSLeandro Dorileo 	struct ethtool_link_ksettings ecmd;
1288f04b514cSVladimir Oltean 	int speed = SPEED_10;
1289f04b514cSVladimir Oltean 	int picos_per_byte;
1290f04b514cSVladimir Oltean 	int err;
12917b9eba7bSLeandro Dorileo 
1292f04b514cSVladimir Oltean 	err = __ethtool_get_link_ksettings(dev, &ecmd);
1293f04b514cSVladimir Oltean 	if (err < 0)
1294f04b514cSVladimir Oltean 		goto skip;
1295f04b514cSVladimir Oltean 
12969a9251a3SVladimir Oltean 	if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
1297f04b514cSVladimir Oltean 		speed = ecmd.base.speed;
1298f04b514cSVladimir Oltean 
1299f04b514cSVladimir Oltean skip:
130068ce6688SVladimir Oltean 	picos_per_byte = (USEC_PER_SEC * 8) / speed;
13017b9eba7bSLeandro Dorileo 
13027b9eba7bSLeandro Dorileo 	atomic64_set(&q->picos_per_byte, picos_per_byte);
13037b9eba7bSLeandro Dorileo 	netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
13047b9eba7bSLeandro Dorileo 		   dev->name, (long long)atomic64_read(&q->picos_per_byte),
13057b9eba7bSLeandro Dorileo 		   ecmd.base.speed);
13067b9eba7bSLeandro Dorileo }
13077b9eba7bSLeandro Dorileo 
13087b9eba7bSLeandro Dorileo static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
13097b9eba7bSLeandro Dorileo 			       void *ptr)
13107b9eba7bSLeandro Dorileo {
13117b9eba7bSLeandro Dorileo 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1312fed87cc6SVladimir Oltean 	struct sched_gate_list *oper, *admin;
1313fed87cc6SVladimir Oltean 	struct qdisc_size_table *stab;
13147b9eba7bSLeandro Dorileo 	struct taprio_sched *q;
13157b9eba7bSLeandro Dorileo 
13167b9eba7bSLeandro Dorileo 	ASSERT_RTNL();
13177b9eba7bSLeandro Dorileo 
13187b9eba7bSLeandro Dorileo 	if (event != NETDEV_UP && event != NETDEV_CHANGE)
13197b9eba7bSLeandro Dorileo 		return NOTIFY_DONE;
13207b9eba7bSLeandro Dorileo 
13217b9eba7bSLeandro Dorileo 	list_for_each_entry(q, &taprio_list, taprio_list) {
1322fc4f2fd0SVladimir Oltean 		if (dev != qdisc_dev(q->root))
1323fc4f2fd0SVladimir Oltean 			continue;
1324fc4f2fd0SVladimir Oltean 
1325fc4f2fd0SVladimir Oltean 		taprio_set_picos_per_byte(dev, q);
1326fed87cc6SVladimir Oltean 
1327fed87cc6SVladimir Oltean 		stab = rtnl_dereference(q->root->stab);
1328fed87cc6SVladimir Oltean 
1329fed87cc6SVladimir Oltean 		oper = rtnl_dereference(q->oper_sched);
1330fed87cc6SVladimir Oltean 		if (oper)
1331fed87cc6SVladimir Oltean 			taprio_update_queue_max_sdu(q, oper, stab);
1332fed87cc6SVladimir Oltean 
1333fed87cc6SVladimir Oltean 		admin = rtnl_dereference(q->admin_sched);
1334fed87cc6SVladimir Oltean 		if (admin)
1335fed87cc6SVladimir Oltean 			taprio_update_queue_max_sdu(q, admin, stab);
1336fed87cc6SVladimir Oltean 
13377b9eba7bSLeandro Dorileo 		break;
13387b9eba7bSLeandro Dorileo 	}
13397b9eba7bSLeandro Dorileo 
13407b9eba7bSLeandro Dorileo 	return NOTIFY_DONE;
13417b9eba7bSLeandro Dorileo }
13427b9eba7bSLeandro Dorileo 
13434cfd5779SVedang Patel static void setup_txtime(struct taprio_sched *q,
13444cfd5779SVedang Patel 			 struct sched_gate_list *sched, ktime_t base)
13454cfd5779SVedang Patel {
13464cfd5779SVedang Patel 	struct sched_entry *entry;
13474cfd5779SVedang Patel 	u32 interval = 0;
13484cfd5779SVedang Patel 
13494cfd5779SVedang Patel 	list_for_each_entry(entry, &sched->entries, list) {
13504cfd5779SVedang Patel 		entry->next_txtime = ktime_add_ns(base, interval);
13514cfd5779SVedang Patel 		interval += entry->interval;
13524cfd5779SVedang Patel 	}
13534cfd5779SVedang Patel }
13544cfd5779SVedang Patel 
13559c66d156SVinicius Costa Gomes static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries)
13569c66d156SVinicius Costa Gomes {
13579c66d156SVinicius Costa Gomes 	struct __tc_taprio_qopt_offload *__offload;
13589c66d156SVinicius Costa Gomes 
135911a33de2SGustavo A. R. Silva 	__offload = kzalloc(struct_size(__offload, offload.entries, num_entries),
136011a33de2SGustavo A. R. Silva 			    GFP_KERNEL);
13619c66d156SVinicius Costa Gomes 	if (!__offload)
13629c66d156SVinicius Costa Gomes 		return NULL;
13639c66d156SVinicius Costa Gomes 
13649c66d156SVinicius Costa Gomes 	refcount_set(&__offload->users, 1);
13659c66d156SVinicius Costa Gomes 
13669c66d156SVinicius Costa Gomes 	return &__offload->offload;
13679c66d156SVinicius Costa Gomes }
13689c66d156SVinicius Costa Gomes 
13699c66d156SVinicius Costa Gomes struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
13709c66d156SVinicius Costa Gomes 						  *offload)
13719c66d156SVinicius Costa Gomes {
13729c66d156SVinicius Costa Gomes 	struct __tc_taprio_qopt_offload *__offload;
13739c66d156SVinicius Costa Gomes 
13749c66d156SVinicius Costa Gomes 	__offload = container_of(offload, struct __tc_taprio_qopt_offload,
13759c66d156SVinicius Costa Gomes 				 offload);
13769c66d156SVinicius Costa Gomes 
13779c66d156SVinicius Costa Gomes 	refcount_inc(&__offload->users);
13789c66d156SVinicius Costa Gomes 
13799c66d156SVinicius Costa Gomes 	return offload;
13809c66d156SVinicius Costa Gomes }
13819c66d156SVinicius Costa Gomes EXPORT_SYMBOL_GPL(taprio_offload_get);
13829c66d156SVinicius Costa Gomes 
13839c66d156SVinicius Costa Gomes void taprio_offload_free(struct tc_taprio_qopt_offload *offload)
13849c66d156SVinicius Costa Gomes {
13859c66d156SVinicius Costa Gomes 	struct __tc_taprio_qopt_offload *__offload;
13869c66d156SVinicius Costa Gomes 
13879c66d156SVinicius Costa Gomes 	__offload = container_of(offload, struct __tc_taprio_qopt_offload,
13889c66d156SVinicius Costa Gomes 				 offload);
13899c66d156SVinicius Costa Gomes 
13909c66d156SVinicius Costa Gomes 	if (!refcount_dec_and_test(&__offload->users))
13919c66d156SVinicius Costa Gomes 		return;
13929c66d156SVinicius Costa Gomes 
13939c66d156SVinicius Costa Gomes 	kfree(__offload);
13949c66d156SVinicius Costa Gomes }
13959c66d156SVinicius Costa Gomes EXPORT_SYMBOL_GPL(taprio_offload_free);
13969c66d156SVinicius Costa Gomes 
13979c66d156SVinicius Costa Gomes /* The function will only serve to keep the pointers to the "oper" and "admin"
13989c66d156SVinicius Costa Gomes  * schedules valid in relation to their base times, so when calling dump() the
13999c66d156SVinicius Costa Gomes  * users looks at the right schedules.
14009c66d156SVinicius Costa Gomes  * When using full offload, the admin configuration is promoted to oper at the
14019c66d156SVinicius Costa Gomes  * base_time in the PHC time domain.  But because the system time is not
14029c66d156SVinicius Costa Gomes  * necessarily in sync with that, we can't just trigger a hrtimer to call
14039c66d156SVinicius Costa Gomes  * switch_schedules at the right hardware time.
14049c66d156SVinicius Costa Gomes  * At the moment we call this by hand right away from taprio, but in the future
14059c66d156SVinicius Costa Gomes  * it will be useful to create a mechanism for drivers to notify taprio of the
14069c66d156SVinicius Costa Gomes  * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump().
14079c66d156SVinicius Costa Gomes  * This is left as TODO.
14089c66d156SVinicius Costa Gomes  */
1409d665c128SYi Wang static void taprio_offload_config_changed(struct taprio_sched *q)
14109c66d156SVinicius Costa Gomes {
14119c66d156SVinicius Costa Gomes 	struct sched_gate_list *oper, *admin;
14129c66d156SVinicius Costa Gomes 
1413c8cbe123SVladimir Oltean 	oper = rtnl_dereference(q->oper_sched);
1414c8cbe123SVladimir Oltean 	admin = rtnl_dereference(q->admin_sched);
14159c66d156SVinicius Costa Gomes 
14169c66d156SVinicius Costa Gomes 	switch_schedules(q, &admin, &oper);
14179c66d156SVinicius Costa Gomes }
14189c66d156SVinicius Costa Gomes 
141909e31cf0SVinicius Costa Gomes static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask)
142009e31cf0SVinicius Costa Gomes {
142109e31cf0SVinicius Costa Gomes 	u32 i, queue_mask = 0;
142209e31cf0SVinicius Costa Gomes 
142309e31cf0SVinicius Costa Gomes 	for (i = 0; i < dev->num_tc; i++) {
142409e31cf0SVinicius Costa Gomes 		u32 offset, count;
142509e31cf0SVinicius Costa Gomes 
142609e31cf0SVinicius Costa Gomes 		if (!(tc_mask & BIT(i)))
142709e31cf0SVinicius Costa Gomes 			continue;
142809e31cf0SVinicius Costa Gomes 
142909e31cf0SVinicius Costa Gomes 		offset = dev->tc_to_txq[i].offset;
143009e31cf0SVinicius Costa Gomes 		count = dev->tc_to_txq[i].count;
143109e31cf0SVinicius Costa Gomes 
143209e31cf0SVinicius Costa Gomes 		queue_mask |= GENMASK(offset + count - 1, offset);
143309e31cf0SVinicius Costa Gomes 	}
143409e31cf0SVinicius Costa Gomes 
143509e31cf0SVinicius Costa Gomes 	return queue_mask;
143609e31cf0SVinicius Costa Gomes }
143709e31cf0SVinicius Costa Gomes 
143809e31cf0SVinicius Costa Gomes static void taprio_sched_to_offload(struct net_device *dev,
14399c66d156SVinicius Costa Gomes 				    struct sched_gate_list *sched,
1440522d15eaSVladimir Oltean 				    struct tc_taprio_qopt_offload *offload,
1441522d15eaSVladimir Oltean 				    const struct tc_taprio_caps *caps)
14429c66d156SVinicius Costa Gomes {
14439c66d156SVinicius Costa Gomes 	struct sched_entry *entry;
14449c66d156SVinicius Costa Gomes 	int i = 0;
14459c66d156SVinicius Costa Gomes 
14469c66d156SVinicius Costa Gomes 	offload->base_time = sched->base_time;
14479c66d156SVinicius Costa Gomes 	offload->cycle_time = sched->cycle_time;
14489c66d156SVinicius Costa Gomes 	offload->cycle_time_extension = sched->cycle_time_extension;
14499c66d156SVinicius Costa Gomes 
14509c66d156SVinicius Costa Gomes 	list_for_each_entry(entry, &sched->entries, list) {
14519c66d156SVinicius Costa Gomes 		struct tc_taprio_sched_entry *e = &offload->entries[i];
14529c66d156SVinicius Costa Gomes 
14539c66d156SVinicius Costa Gomes 		e->command = entry->command;
14549c66d156SVinicius Costa Gomes 		e->interval = entry->interval;
1455522d15eaSVladimir Oltean 		if (caps->gate_mask_per_txq)
1456522d15eaSVladimir Oltean 			e->gate_mask = tc_map_to_queue_mask(dev,
1457522d15eaSVladimir Oltean 							    entry->gate_mask);
1458522d15eaSVladimir Oltean 		else
1459522d15eaSVladimir Oltean 			e->gate_mask = entry->gate_mask;
146009e31cf0SVinicius Costa Gomes 
14619c66d156SVinicius Costa Gomes 		i++;
14629c66d156SVinicius Costa Gomes 	}
14639c66d156SVinicius Costa Gomes 
14649c66d156SVinicius Costa Gomes 	offload->num_entries = i;
14659c66d156SVinicius Costa Gomes }
14669c66d156SVinicius Costa Gomes 
14672f530df7SVladimir Oltean static void taprio_detect_broken_mqprio(struct taprio_sched *q)
14682f530df7SVladimir Oltean {
14692f530df7SVladimir Oltean 	struct net_device *dev = qdisc_dev(q->root);
14702f530df7SVladimir Oltean 	struct tc_taprio_caps caps;
14712f530df7SVladimir Oltean 
14722f530df7SVladimir Oltean 	qdisc_offload_query_caps(dev, TC_SETUP_QDISC_TAPRIO,
14732f530df7SVladimir Oltean 				 &caps, sizeof(caps));
14742f530df7SVladimir Oltean 
14752f530df7SVladimir Oltean 	q->broken_mqprio = caps.broken_mqprio;
14762f530df7SVladimir Oltean 	if (q->broken_mqprio)
14772f530df7SVladimir Oltean 		static_branch_inc(&taprio_have_broken_mqprio);
14782f530df7SVladimir Oltean 	else
14792f530df7SVladimir Oltean 		static_branch_inc(&taprio_have_working_mqprio);
14802f530df7SVladimir Oltean 
14812f530df7SVladimir Oltean 	q->detected_mqprio = true;
14822f530df7SVladimir Oltean }
14832f530df7SVladimir Oltean 
14842f530df7SVladimir Oltean static void taprio_cleanup_broken_mqprio(struct taprio_sched *q)
14852f530df7SVladimir Oltean {
14862f530df7SVladimir Oltean 	if (!q->detected_mqprio)
14872f530df7SVladimir Oltean 		return;
14882f530df7SVladimir Oltean 
14892f530df7SVladimir Oltean 	if (q->broken_mqprio)
14902f530df7SVladimir Oltean 		static_branch_dec(&taprio_have_broken_mqprio);
14912f530df7SVladimir Oltean 	else
14922f530df7SVladimir Oltean 		static_branch_dec(&taprio_have_working_mqprio);
14932f530df7SVladimir Oltean }
14942f530df7SVladimir Oltean 
14959c66d156SVinicius Costa Gomes static int taprio_enable_offload(struct net_device *dev,
14969c66d156SVinicius Costa Gomes 				 struct taprio_sched *q,
14979c66d156SVinicius Costa Gomes 				 struct sched_gate_list *sched,
14989c66d156SVinicius Costa Gomes 				 struct netlink_ext_ack *extack)
14999c66d156SVinicius Costa Gomes {
15009c66d156SVinicius Costa Gomes 	const struct net_device_ops *ops = dev->netdev_ops;
15019c66d156SVinicius Costa Gomes 	struct tc_taprio_qopt_offload *offload;
1502a54fc09eSVladimir Oltean 	struct tc_taprio_caps caps;
1503a54fc09eSVladimir Oltean 	int tc, err = 0;
15049c66d156SVinicius Costa Gomes 
15059c66d156SVinicius Costa Gomes 	if (!ops->ndo_setup_tc) {
15069c66d156SVinicius Costa Gomes 		NL_SET_ERR_MSG(extack,
15079c66d156SVinicius Costa Gomes 			       "Device does not support taprio offload");
15089c66d156SVinicius Costa Gomes 		return -EOPNOTSUPP;
15099c66d156SVinicius Costa Gomes 	}
15109c66d156SVinicius Costa Gomes 
1511a54fc09eSVladimir Oltean 	qdisc_offload_query_caps(dev, TC_SETUP_QDISC_TAPRIO,
1512a54fc09eSVladimir Oltean 				 &caps, sizeof(caps));
1513a54fc09eSVladimir Oltean 
1514a54fc09eSVladimir Oltean 	if (!caps.supports_queue_max_sdu) {
1515a54fc09eSVladimir Oltean 		for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
1516a54fc09eSVladimir Oltean 			if (q->max_sdu[tc]) {
1517a54fc09eSVladimir Oltean 				NL_SET_ERR_MSG_MOD(extack,
1518a54fc09eSVladimir Oltean 						   "Device does not handle queueMaxSDU");
1519a54fc09eSVladimir Oltean 				return -EOPNOTSUPP;
1520a54fc09eSVladimir Oltean 			}
1521a54fc09eSVladimir Oltean 		}
1522a54fc09eSVladimir Oltean 	}
1523a54fc09eSVladimir Oltean 
15249c66d156SVinicius Costa Gomes 	offload = taprio_offload_alloc(sched->num_entries);
15259c66d156SVinicius Costa Gomes 	if (!offload) {
15269c66d156SVinicius Costa Gomes 		NL_SET_ERR_MSG(extack,
15279c66d156SVinicius Costa Gomes 			       "Not enough memory for enabling offload mode");
15289c66d156SVinicius Costa Gomes 		return -ENOMEM;
15299c66d156SVinicius Costa Gomes 	}
15302d800bc5SVladimir Oltean 	offload->cmd = TAPRIO_CMD_REPLACE;
1531c54876cdSVladimir Oltean 	offload->extack = extack;
153209c794c0SVladimir Oltean 	mqprio_qopt_reconstruct(dev, &offload->mqprio.qopt);
1533c54876cdSVladimir Oltean 	offload->mqprio.extack = extack;
1534522d15eaSVladimir Oltean 	taprio_sched_to_offload(dev, sched, offload, &caps);
1535a721c3e5SVladimir Oltean 	mqprio_fp_to_offload(q->fp, &offload->mqprio);
15369c66d156SVinicius Costa Gomes 
1537a54fc09eSVladimir Oltean 	for (tc = 0; tc < TC_MAX_QUEUE; tc++)
1538a54fc09eSVladimir Oltean 		offload->max_sdu[tc] = q->max_sdu[tc];
1539a54fc09eSVladimir Oltean 
15409c66d156SVinicius Costa Gomes 	err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
15419c66d156SVinicius Costa Gomes 	if (err < 0) {
1542c54876cdSVladimir Oltean 		NL_SET_ERR_MSG_WEAK(extack,
15439c66d156SVinicius Costa Gomes 				    "Device failed to setup taprio offload");
15449c66d156SVinicius Costa Gomes 		goto done;
15459c66d156SVinicius Costa Gomes 	}
15469c66d156SVinicius Costa Gomes 
1547db46e3a8SVladimir Oltean 	q->offloaded = true;
1548db46e3a8SVladimir Oltean 
15499c66d156SVinicius Costa Gomes done:
1550c54876cdSVladimir Oltean 	/* The offload structure may linger around via a reference taken by the
1551c54876cdSVladimir Oltean 	 * device driver, so clear up the netlink extack pointer so that the
1552c54876cdSVladimir Oltean 	 * driver isn't tempted to dereference data which stopped being valid
1553c54876cdSVladimir Oltean 	 */
1554c54876cdSVladimir Oltean 	offload->extack = NULL;
1555c54876cdSVladimir Oltean 	offload->mqprio.extack = NULL;
15569c66d156SVinicius Costa Gomes 	taprio_offload_free(offload);
15579c66d156SVinicius Costa Gomes 
15589c66d156SVinicius Costa Gomes 	return err;
15599c66d156SVinicius Costa Gomes }
15609c66d156SVinicius Costa Gomes 
15619c66d156SVinicius Costa Gomes static int taprio_disable_offload(struct net_device *dev,
15629c66d156SVinicius Costa Gomes 				  struct taprio_sched *q,
15639c66d156SVinicius Costa Gomes 				  struct netlink_ext_ack *extack)
15649c66d156SVinicius Costa Gomes {
15659c66d156SVinicius Costa Gomes 	const struct net_device_ops *ops = dev->netdev_ops;
15669c66d156SVinicius Costa Gomes 	struct tc_taprio_qopt_offload *offload;
15679c66d156SVinicius Costa Gomes 	int err;
15689c66d156SVinicius Costa Gomes 
1569db46e3a8SVladimir Oltean 	if (!q->offloaded)
15709c66d156SVinicius Costa Gomes 		return 0;
15719c66d156SVinicius Costa Gomes 
15729c66d156SVinicius Costa Gomes 	offload = taprio_offload_alloc(0);
15739c66d156SVinicius Costa Gomes 	if (!offload) {
15749c66d156SVinicius Costa Gomes 		NL_SET_ERR_MSG(extack,
15759c66d156SVinicius Costa Gomes 			       "Not enough memory to disable offload mode");
15769c66d156SVinicius Costa Gomes 		return -ENOMEM;
15779c66d156SVinicius Costa Gomes 	}
15782d800bc5SVladimir Oltean 	offload->cmd = TAPRIO_CMD_DESTROY;
15799c66d156SVinicius Costa Gomes 
15809c66d156SVinicius Costa Gomes 	err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
15819c66d156SVinicius Costa Gomes 	if (err < 0) {
15829c66d156SVinicius Costa Gomes 		NL_SET_ERR_MSG(extack,
15839c66d156SVinicius Costa Gomes 			       "Device failed to disable offload");
15849c66d156SVinicius Costa Gomes 		goto out;
15859c66d156SVinicius Costa Gomes 	}
15869c66d156SVinicius Costa Gomes 
1587db46e3a8SVladimir Oltean 	q->offloaded = false;
1588db46e3a8SVladimir Oltean 
15899c66d156SVinicius Costa Gomes out:
15909c66d156SVinicius Costa Gomes 	taprio_offload_free(offload);
15919c66d156SVinicius Costa Gomes 
15929c66d156SVinicius Costa Gomes 	return err;
15939c66d156SVinicius Costa Gomes }
15949c66d156SVinicius Costa Gomes 
15959c66d156SVinicius Costa Gomes /* If full offload is enabled, the only possible clockid is the net device's
15969c66d156SVinicius Costa Gomes  * PHC. For that reason, specifying a clockid through netlink is incorrect.
15979c66d156SVinicius Costa Gomes  * For txtime-assist, it is implicitly assumed that the device's PHC is kept
15989c66d156SVinicius Costa Gomes  * in sync with the specified clockid via a user space daemon such as phc2sys.
15999c66d156SVinicius Costa Gomes  * For both software taprio and txtime-assist, the clockid is used for the
16009c66d156SVinicius Costa Gomes  * hrtimer that advances the schedule and hence mandatory.
16019c66d156SVinicius Costa Gomes  */
16029c66d156SVinicius Costa Gomes static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
16039c66d156SVinicius Costa Gomes 				struct netlink_ext_ack *extack)
16049c66d156SVinicius Costa Gomes {
16059c66d156SVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
16069c66d156SVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
16079c66d156SVinicius Costa Gomes 	int err = -EINVAL;
16089c66d156SVinicius Costa Gomes 
16099c66d156SVinicius Costa Gomes 	if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
16109c66d156SVinicius Costa Gomes 		const struct ethtool_ops *ops = dev->ethtool_ops;
16119c66d156SVinicius Costa Gomes 		struct ethtool_ts_info info = {
16129c66d156SVinicius Costa Gomes 			.cmd = ETHTOOL_GET_TS_INFO,
16139c66d156SVinicius Costa Gomes 			.phc_index = -1,
16149c66d156SVinicius Costa Gomes 		};
16159c66d156SVinicius Costa Gomes 
16169c66d156SVinicius Costa Gomes 		if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
16179c66d156SVinicius Costa Gomes 			NL_SET_ERR_MSG(extack,
16189c66d156SVinicius Costa Gomes 				       "The 'clockid' cannot be specified for full offload");
16199c66d156SVinicius Costa Gomes 			goto out;
16209c66d156SVinicius Costa Gomes 		}
16219c66d156SVinicius Costa Gomes 
16229c66d156SVinicius Costa Gomes 		if (ops && ops->get_ts_info)
16239c66d156SVinicius Costa Gomes 			err = ops->get_ts_info(dev, &info);
16249c66d156SVinicius Costa Gomes 
16259c66d156SVinicius Costa Gomes 		if (err || info.phc_index < 0) {
16269c66d156SVinicius Costa Gomes 			NL_SET_ERR_MSG(extack,
16279c66d156SVinicius Costa Gomes 				       "Device does not have a PTP clock");
16289c66d156SVinicius Costa Gomes 			err = -ENOTSUPP;
16299c66d156SVinicius Costa Gomes 			goto out;
16309c66d156SVinicius Costa Gomes 		}
16319c66d156SVinicius Costa Gomes 	} else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
16329c66d156SVinicius Costa Gomes 		int clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
16336dc25401SEric Dumazet 		enum tk_offsets tk_offset;
16349c66d156SVinicius Costa Gomes 
16359c66d156SVinicius Costa Gomes 		/* We only support static clockids and we don't allow
16369c66d156SVinicius Costa Gomes 		 * for it to be modified after the first init.
16379c66d156SVinicius Costa Gomes 		 */
16389c66d156SVinicius Costa Gomes 		if (clockid < 0 ||
16399c66d156SVinicius Costa Gomes 		    (q->clockid != -1 && q->clockid != clockid)) {
16409c66d156SVinicius Costa Gomes 			NL_SET_ERR_MSG(extack,
16419c66d156SVinicius Costa Gomes 				       "Changing the 'clockid' of a running schedule is not supported");
16429c66d156SVinicius Costa Gomes 			err = -ENOTSUPP;
16439c66d156SVinicius Costa Gomes 			goto out;
16449c66d156SVinicius Costa Gomes 		}
16459c66d156SVinicius Costa Gomes 
16469c66d156SVinicius Costa Gomes 		switch (clockid) {
16479c66d156SVinicius Costa Gomes 		case CLOCK_REALTIME:
16486dc25401SEric Dumazet 			tk_offset = TK_OFFS_REAL;
16499c66d156SVinicius Costa Gomes 			break;
16509c66d156SVinicius Costa Gomes 		case CLOCK_MONOTONIC:
16516dc25401SEric Dumazet 			tk_offset = TK_OFFS_MAX;
16529c66d156SVinicius Costa Gomes 			break;
16539c66d156SVinicius Costa Gomes 		case CLOCK_BOOTTIME:
16546dc25401SEric Dumazet 			tk_offset = TK_OFFS_BOOT;
16559c66d156SVinicius Costa Gomes 			break;
16569c66d156SVinicius Costa Gomes 		case CLOCK_TAI:
16576dc25401SEric Dumazet 			tk_offset = TK_OFFS_TAI;
16589c66d156SVinicius Costa Gomes 			break;
16599c66d156SVinicius Costa Gomes 		default:
16609c66d156SVinicius Costa Gomes 			NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
16619c66d156SVinicius Costa Gomes 			err = -EINVAL;
16629c66d156SVinicius Costa Gomes 			goto out;
16639c66d156SVinicius Costa Gomes 		}
16646dc25401SEric Dumazet 		/* This pairs with READ_ONCE() in taprio_mono_to_any */
16656dc25401SEric Dumazet 		WRITE_ONCE(q->tk_offset, tk_offset);
16669c66d156SVinicius Costa Gomes 
16679c66d156SVinicius Costa Gomes 		q->clockid = clockid;
16689c66d156SVinicius Costa Gomes 	} else {
16699c66d156SVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
16709c66d156SVinicius Costa Gomes 		goto out;
16719c66d156SVinicius Costa Gomes 	}
1672a954380aSVinicius Costa Gomes 
1673a954380aSVinicius Costa Gomes 	/* Everything went ok, return success. */
1674a954380aSVinicius Costa Gomes 	err = 0;
1675a954380aSVinicius Costa Gomes 
16769c66d156SVinicius Costa Gomes out:
16779c66d156SVinicius Costa Gomes 	return err;
16789c66d156SVinicius Costa Gomes }
16799c66d156SVinicius Costa Gomes 
1680a54fc09eSVladimir Oltean static int taprio_parse_tc_entry(struct Qdisc *sch,
1681a54fc09eSVladimir Oltean 				 struct nlattr *opt,
1682a54fc09eSVladimir Oltean 				 u32 max_sdu[TC_QOPT_MAX_QUEUE],
1683a721c3e5SVladimir Oltean 				 u32 fp[TC_QOPT_MAX_QUEUE],
1684a54fc09eSVladimir Oltean 				 unsigned long *seen_tcs,
1685a54fc09eSVladimir Oltean 				 struct netlink_ext_ack *extack)
1686a54fc09eSVladimir Oltean {
1687a54fc09eSVladimir Oltean 	struct nlattr *tb[TCA_TAPRIO_TC_ENTRY_MAX + 1] = { };
1688a54fc09eSVladimir Oltean 	struct net_device *dev = qdisc_dev(sch);
1689a54fc09eSVladimir Oltean 	int err, tc;
1690a721c3e5SVladimir Oltean 	u32 val;
1691a54fc09eSVladimir Oltean 
1692a54fc09eSVladimir Oltean 	err = nla_parse_nested(tb, TCA_TAPRIO_TC_ENTRY_MAX, opt,
1693a54fc09eSVladimir Oltean 			       taprio_tc_policy, extack);
1694a54fc09eSVladimir Oltean 	if (err < 0)
1695a54fc09eSVladimir Oltean 		return err;
1696a54fc09eSVladimir Oltean 
1697a54fc09eSVladimir Oltean 	if (!tb[TCA_TAPRIO_TC_ENTRY_INDEX]) {
1698a54fc09eSVladimir Oltean 		NL_SET_ERR_MSG_MOD(extack, "TC entry index missing");
1699a54fc09eSVladimir Oltean 		return -EINVAL;
1700a54fc09eSVladimir Oltean 	}
1701a54fc09eSVladimir Oltean 
1702a54fc09eSVladimir Oltean 	tc = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_INDEX]);
1703a54fc09eSVladimir Oltean 	if (tc >= TC_QOPT_MAX_QUEUE) {
1704a54fc09eSVladimir Oltean 		NL_SET_ERR_MSG_MOD(extack, "TC entry index out of range");
1705a54fc09eSVladimir Oltean 		return -ERANGE;
1706a54fc09eSVladimir Oltean 	}
1707a54fc09eSVladimir Oltean 
1708a54fc09eSVladimir Oltean 	if (*seen_tcs & BIT(tc)) {
1709a54fc09eSVladimir Oltean 		NL_SET_ERR_MSG_MOD(extack, "Duplicate TC entry");
1710a54fc09eSVladimir Oltean 		return -EINVAL;
1711a54fc09eSVladimir Oltean 	}
1712a54fc09eSVladimir Oltean 
1713a54fc09eSVladimir Oltean 	*seen_tcs |= BIT(tc);
1714a54fc09eSVladimir Oltean 
1715a721c3e5SVladimir Oltean 	if (tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]) {
1716a54fc09eSVladimir Oltean 		val = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]);
1717a54fc09eSVladimir Oltean 		if (val > dev->max_mtu) {
1718a54fc09eSVladimir Oltean 			NL_SET_ERR_MSG_MOD(extack, "TC max SDU exceeds device max MTU");
1719a54fc09eSVladimir Oltean 			return -ERANGE;
1720a54fc09eSVladimir Oltean 		}
1721a54fc09eSVladimir Oltean 
1722a54fc09eSVladimir Oltean 		max_sdu[tc] = val;
1723a721c3e5SVladimir Oltean 	}
1724a721c3e5SVladimir Oltean 
1725a721c3e5SVladimir Oltean 	if (tb[TCA_TAPRIO_TC_ENTRY_FP])
1726a721c3e5SVladimir Oltean 		fp[tc] = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_FP]);
1727a54fc09eSVladimir Oltean 
1728a54fc09eSVladimir Oltean 	return 0;
1729a54fc09eSVladimir Oltean }
1730a54fc09eSVladimir Oltean 
1731a54fc09eSVladimir Oltean static int taprio_parse_tc_entries(struct Qdisc *sch,
1732a54fc09eSVladimir Oltean 				   struct nlattr *opt,
1733a54fc09eSVladimir Oltean 				   struct netlink_ext_ack *extack)
1734a54fc09eSVladimir Oltean {
1735a54fc09eSVladimir Oltean 	struct taprio_sched *q = qdisc_priv(sch);
1736a721c3e5SVladimir Oltean 	struct net_device *dev = qdisc_dev(sch);
1737a54fc09eSVladimir Oltean 	u32 max_sdu[TC_QOPT_MAX_QUEUE];
1738a721c3e5SVladimir Oltean 	bool have_preemption = false;
1739a54fc09eSVladimir Oltean 	unsigned long seen_tcs = 0;
1740a721c3e5SVladimir Oltean 	u32 fp[TC_QOPT_MAX_QUEUE];
1741a54fc09eSVladimir Oltean 	struct nlattr *n;
1742a54fc09eSVladimir Oltean 	int tc, rem;
1743a54fc09eSVladimir Oltean 	int err = 0;
1744a54fc09eSVladimir Oltean 
1745a721c3e5SVladimir Oltean 	for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
1746a54fc09eSVladimir Oltean 		max_sdu[tc] = q->max_sdu[tc];
1747a721c3e5SVladimir Oltean 		fp[tc] = q->fp[tc];
1748a721c3e5SVladimir Oltean 	}
1749a54fc09eSVladimir Oltean 
1750a54fc09eSVladimir Oltean 	nla_for_each_nested(n, opt, rem) {
1751a54fc09eSVladimir Oltean 		if (nla_type(n) != TCA_TAPRIO_ATTR_TC_ENTRY)
1752a54fc09eSVladimir Oltean 			continue;
1753a54fc09eSVladimir Oltean 
1754a721c3e5SVladimir Oltean 		err = taprio_parse_tc_entry(sch, n, max_sdu, fp, &seen_tcs,
1755fed87cc6SVladimir Oltean 					    extack);
1756a54fc09eSVladimir Oltean 		if (err)
1757a721c3e5SVladimir Oltean 			return err;
1758a54fc09eSVladimir Oltean 	}
1759a54fc09eSVladimir Oltean 
1760a721c3e5SVladimir Oltean 	for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
1761a54fc09eSVladimir Oltean 		q->max_sdu[tc] = max_sdu[tc];
1762a721c3e5SVladimir Oltean 		q->fp[tc] = fp[tc];
1763a721c3e5SVladimir Oltean 		if (fp[tc] != TC_FP_EXPRESS)
1764a721c3e5SVladimir Oltean 			have_preemption = true;
1765a721c3e5SVladimir Oltean 	}
1766a54fc09eSVladimir Oltean 
1767a721c3e5SVladimir Oltean 	if (have_preemption) {
1768a721c3e5SVladimir Oltean 		if (!FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1769a721c3e5SVladimir Oltean 			NL_SET_ERR_MSG(extack,
1770a721c3e5SVladimir Oltean 				       "Preemption only supported with full offload");
1771a721c3e5SVladimir Oltean 			return -EOPNOTSUPP;
1772a721c3e5SVladimir Oltean 		}
1773a721c3e5SVladimir Oltean 
1774a721c3e5SVladimir Oltean 		if (!ethtool_dev_mm_supported(dev)) {
1775a721c3e5SVladimir Oltean 			NL_SET_ERR_MSG(extack,
1776a721c3e5SVladimir Oltean 				       "Device does not support preemption");
1777a721c3e5SVladimir Oltean 			return -EOPNOTSUPP;
1778a721c3e5SVladimir Oltean 		}
1779a721c3e5SVladimir Oltean 	}
1780a721c3e5SVladimir Oltean 
1781a54fc09eSVladimir Oltean 	return err;
1782a54fc09eSVladimir Oltean }
1783a54fc09eSVladimir Oltean 
1784b5a0faa3SIvan Khoronzhuk static int taprio_mqprio_cmp(const struct net_device *dev,
1785b5a0faa3SIvan Khoronzhuk 			     const struct tc_mqprio_qopt *mqprio)
1786b5a0faa3SIvan Khoronzhuk {
1787b5a0faa3SIvan Khoronzhuk 	int i;
1788b5a0faa3SIvan Khoronzhuk 
1789b5a0faa3SIvan Khoronzhuk 	if (!mqprio || mqprio->num_tc != dev->num_tc)
1790b5a0faa3SIvan Khoronzhuk 		return -1;
1791b5a0faa3SIvan Khoronzhuk 
1792b5a0faa3SIvan Khoronzhuk 	for (i = 0; i < mqprio->num_tc; i++)
1793b5a0faa3SIvan Khoronzhuk 		if (dev->tc_to_txq[i].count != mqprio->count[i] ||
1794b5a0faa3SIvan Khoronzhuk 		    dev->tc_to_txq[i].offset != mqprio->offset[i])
1795b5a0faa3SIvan Khoronzhuk 			return -1;
1796b5a0faa3SIvan Khoronzhuk 
1797b5a0faa3SIvan Khoronzhuk 	for (i = 0; i <= TC_BITMASK; i++)
1798b5a0faa3SIvan Khoronzhuk 		if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i])
1799b5a0faa3SIvan Khoronzhuk 			return -1;
1800b5a0faa3SIvan Khoronzhuk 
1801b5a0faa3SIvan Khoronzhuk 	return 0;
1802b5a0faa3SIvan Khoronzhuk }
1803b5a0faa3SIvan Khoronzhuk 
1804a9d62274SVinicius Costa Gomes /* The semantics of the 'flags' argument in relation to 'change()'
1805a9d62274SVinicius Costa Gomes  * requests, are interpreted following two rules (which are applied in
1806a9d62274SVinicius Costa Gomes  * this order): (1) an omitted 'flags' argument is interpreted as
1807a9d62274SVinicius Costa Gomes  * zero; (2) the 'flags' of a "running" taprio instance cannot be
1808a9d62274SVinicius Costa Gomes  * changed.
1809a9d62274SVinicius Costa Gomes  */
1810a9d62274SVinicius Costa Gomes static int taprio_new_flags(const struct nlattr *attr, u32 old,
1811a9d62274SVinicius Costa Gomes 			    struct netlink_ext_ack *extack)
1812a9d62274SVinicius Costa Gomes {
1813a9d62274SVinicius Costa Gomes 	u32 new = 0;
1814a9d62274SVinicius Costa Gomes 
1815a9d62274SVinicius Costa Gomes 	if (attr)
1816a9d62274SVinicius Costa Gomes 		new = nla_get_u32(attr);
1817a9d62274SVinicius Costa Gomes 
1818a9d62274SVinicius Costa Gomes 	if (old != TAPRIO_FLAGS_INVALID && old != new) {
1819a9d62274SVinicius Costa Gomes 		NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
1820a9d62274SVinicius Costa Gomes 		return -EOPNOTSUPP;
1821a9d62274SVinicius Costa Gomes 	}
1822a9d62274SVinicius Costa Gomes 
1823a9d62274SVinicius Costa Gomes 	if (!taprio_flags_valid(new)) {
1824a9d62274SVinicius Costa Gomes 		NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
1825a9d62274SVinicius Costa Gomes 		return -EINVAL;
1826a9d62274SVinicius Costa Gomes 	}
1827a9d62274SVinicius Costa Gomes 
1828a9d62274SVinicius Costa Gomes 	return new;
1829a9d62274SVinicius Costa Gomes }
1830a9d62274SVinicius Costa Gomes 
18315a781ccbSVinicius Costa Gomes static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
18325a781ccbSVinicius Costa Gomes 			 struct netlink_ext_ack *extack)
18335a781ccbSVinicius Costa Gomes {
1834a3d91b2cSVladimir Oltean 	struct qdisc_size_table *stab = rtnl_dereference(sch->stab);
18355a781ccbSVinicius Costa Gomes 	struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
1836a3d43c0dSVinicius Costa Gomes 	struct sched_gate_list *oper, *admin, *new_admin;
18375a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
18385a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
18395a781ccbSVinicius Costa Gomes 	struct tc_mqprio_qopt *mqprio = NULL;
1840a3d43c0dSVinicius Costa Gomes 	unsigned long flags;
18415a781ccbSVinicius Costa Gomes 	ktime_t start;
18429c66d156SVinicius Costa Gomes 	int i, err;
18435a781ccbSVinicius Costa Gomes 
18448cb08174SJohannes Berg 	err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt,
18455a781ccbSVinicius Costa Gomes 					  taprio_policy, extack);
18465a781ccbSVinicius Costa Gomes 	if (err < 0)
18475a781ccbSVinicius Costa Gomes 		return err;
18485a781ccbSVinicius Costa Gomes 
18495a781ccbSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
18505a781ccbSVinicius Costa Gomes 		mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
18515a781ccbSVinicius Costa Gomes 
1852a9d62274SVinicius Costa Gomes 	err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS],
1853a9d62274SVinicius Costa Gomes 			       q->flags, extack);
1854a9d62274SVinicius Costa Gomes 	if (err < 0)
1855a9d62274SVinicius Costa Gomes 		return err;
18564cfd5779SVedang Patel 
1857a9d62274SVinicius Costa Gomes 	q->flags = err;
18584cfd5779SVedang Patel 
1859a9d62274SVinicius Costa Gomes 	err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
18605a781ccbSVinicius Costa Gomes 	if (err < 0)
18615a781ccbSVinicius Costa Gomes 		return err;
18625a781ccbSVinicius Costa Gomes 
1863a54fc09eSVladimir Oltean 	err = taprio_parse_tc_entries(sch, opt, extack);
1864a54fc09eSVladimir Oltean 	if (err)
1865a54fc09eSVladimir Oltean 		return err;
1866a54fc09eSVladimir Oltean 
1867a3d43c0dSVinicius Costa Gomes 	new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL);
1868a3d43c0dSVinicius Costa Gomes 	if (!new_admin) {
1869a3d43c0dSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule");
1870a3d43c0dSVinicius Costa Gomes 		return -ENOMEM;
1871a3d43c0dSVinicius Costa Gomes 	}
1872a3d43c0dSVinicius Costa Gomes 	INIT_LIST_HEAD(&new_admin->entries);
18735a781ccbSVinicius Costa Gomes 
187418cdd2f0SVladimir Oltean 	oper = rtnl_dereference(q->oper_sched);
187518cdd2f0SVladimir Oltean 	admin = rtnl_dereference(q->admin_sched);
18765a781ccbSVinicius Costa Gomes 
1877b5a0faa3SIvan Khoronzhuk 	/* no changes - no new mqprio settings */
1878b5a0faa3SIvan Khoronzhuk 	if (!taprio_mqprio_cmp(dev, mqprio))
1879b5a0faa3SIvan Khoronzhuk 		mqprio = NULL;
1880b5a0faa3SIvan Khoronzhuk 
1881a3d43c0dSVinicius Costa Gomes 	if (mqprio && (oper || admin)) {
1882a3d43c0dSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported");
1883a3d43c0dSVinicius Costa Gomes 		err = -ENOTSUPP;
1884a3d43c0dSVinicius Costa Gomes 		goto free_sched;
18855a781ccbSVinicius Costa Gomes 	}
18865a781ccbSVinicius Costa Gomes 
18875652e63dSVinicius Costa Gomes 	if (mqprio) {
1888efe487fcSHaimin Zhang 		err = netdev_set_num_tc(dev, mqprio->num_tc);
1889efe487fcSHaimin Zhang 		if (err)
1890efe487fcSHaimin Zhang 			goto free_sched;
18912f530df7SVladimir Oltean 		for (i = 0; i < mqprio->num_tc; i++) {
18925652e63dSVinicius Costa Gomes 			netdev_set_tc_queue(dev, i,
18935652e63dSVinicius Costa Gomes 					    mqprio->count[i],
18945652e63dSVinicius Costa Gomes 					    mqprio->offset[i]);
18952f530df7SVladimir Oltean 			q->cur_txq[i] = mqprio->offset[i];
18962f530df7SVladimir Oltean 		}
18975652e63dSVinicius Costa Gomes 
18985652e63dSVinicius Costa Gomes 		/* Always use supplied priority mappings */
18995652e63dSVinicius Costa Gomes 		for (i = 0; i <= TC_BITMASK; i++)
19005652e63dSVinicius Costa Gomes 			netdev_set_prio_tc_map(dev, i,
19015652e63dSVinicius Costa Gomes 					       mqprio->prio_tc_map[i]);
19025652e63dSVinicius Costa Gomes 	}
19035652e63dSVinicius Costa Gomes 
190409dbdf28SVladimir Oltean 	err = parse_taprio_schedule(q, tb, new_admin, extack);
190509dbdf28SVladimir Oltean 	if (err < 0)
190609dbdf28SVladimir Oltean 		goto free_sched;
190709dbdf28SVladimir Oltean 
190809dbdf28SVladimir Oltean 	if (new_admin->num_entries == 0) {
190909dbdf28SVladimir Oltean 		NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule");
191009dbdf28SVladimir Oltean 		err = -EINVAL;
191109dbdf28SVladimir Oltean 		goto free_sched;
191209dbdf28SVladimir Oltean 	}
191309dbdf28SVladimir Oltean 
191409dbdf28SVladimir Oltean 	err = taprio_parse_clockid(sch, tb, extack);
191509dbdf28SVladimir Oltean 	if (err < 0)
191609dbdf28SVladimir Oltean 		goto free_sched;
191709dbdf28SVladimir Oltean 
191809dbdf28SVladimir Oltean 	taprio_set_picos_per_byte(dev, q);
191909dbdf28SVladimir Oltean 	taprio_update_queue_max_sdu(q, new_admin, stab);
192009dbdf28SVladimir Oltean 
1921a9d62274SVinicius Costa Gomes 	if (FULL_OFFLOAD_IS_ENABLED(q->flags))
192209e31cf0SVinicius Costa Gomes 		err = taprio_enable_offload(dev, q, new_admin, extack);
19239c66d156SVinicius Costa Gomes 	else
19249c66d156SVinicius Costa Gomes 		err = taprio_disable_offload(dev, q, extack);
19259c66d156SVinicius Costa Gomes 	if (err)
19269c66d156SVinicius Costa Gomes 		goto free_sched;
19279c66d156SVinicius Costa Gomes 
1928a3d43c0dSVinicius Costa Gomes 	/* Protects against enqueue()/dequeue() */
1929a3d43c0dSVinicius Costa Gomes 	spin_lock_bh(qdisc_lock(sch));
1930a3d43c0dSVinicius Costa Gomes 
19314cfd5779SVedang Patel 	if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) {
19324cfd5779SVedang Patel 		if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) {
19334cfd5779SVedang Patel 			NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled");
19344cfd5779SVedang Patel 			err = -EINVAL;
19354cfd5779SVedang Patel 			goto unlock;
19364cfd5779SVedang Patel 		}
19374cfd5779SVedang Patel 
1938a5b64700SVedang Patel 		q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
19394cfd5779SVedang Patel 	}
19404cfd5779SVedang Patel 
1941a9d62274SVinicius Costa Gomes 	if (!TXTIME_ASSIST_IS_ENABLED(q->flags) &&
1942a9d62274SVinicius Costa Gomes 	    !FULL_OFFLOAD_IS_ENABLED(q->flags) &&
19434cfd5779SVedang Patel 	    !hrtimer_active(&q->advance_timer)) {
1944a3d43c0dSVinicius Costa Gomes 		hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
1945a3d43c0dSVinicius Costa Gomes 		q->advance_timer.function = advance_sched;
19465a781ccbSVinicius Costa Gomes 	}
19475a781ccbSVinicius Costa Gomes 
1948a3d43c0dSVinicius Costa Gomes 	err = taprio_get_start_time(sch, new_admin, &start);
1949a3d43c0dSVinicius Costa Gomes 	if (err < 0) {
1950a3d43c0dSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Internal error: failed get start time");
1951a3d43c0dSVinicius Costa Gomes 		goto unlock;
1952a3d43c0dSVinicius Costa Gomes 	}
19535a781ccbSVinicius Costa Gomes 
19544cfd5779SVedang Patel 	setup_txtime(q, new_admin, start);
19554cfd5779SVedang Patel 
1956bfabd41dSVinicius Costa Gomes 	if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
19574cfd5779SVedang Patel 		if (!oper) {
19584cfd5779SVedang Patel 			rcu_assign_pointer(q->oper_sched, new_admin);
19594cfd5779SVedang Patel 			err = 0;
19604cfd5779SVedang Patel 			new_admin = NULL;
19614cfd5779SVedang Patel 			goto unlock;
19624cfd5779SVedang Patel 		}
19634cfd5779SVedang Patel 
19644cfd5779SVedang Patel 		rcu_assign_pointer(q->admin_sched, new_admin);
19654cfd5779SVedang Patel 		if (admin)
19664cfd5779SVedang Patel 			call_rcu(&admin->rcu, taprio_free_sched_cb);
19674cfd5779SVedang Patel 	} else {
1968e5517551SVladimir Oltean 		setup_first_end_time(q, new_admin, start);
1969a3d43c0dSVinicius Costa Gomes 
1970a3d43c0dSVinicius Costa Gomes 		/* Protects against advance_sched() */
1971a3d43c0dSVinicius Costa Gomes 		spin_lock_irqsave(&q->current_entry_lock, flags);
1972a3d43c0dSVinicius Costa Gomes 
1973a3d43c0dSVinicius Costa Gomes 		taprio_start_sched(sch, start, new_admin);
1974a3d43c0dSVinicius Costa Gomes 
1975a3d43c0dSVinicius Costa Gomes 		rcu_assign_pointer(q->admin_sched, new_admin);
1976a3d43c0dSVinicius Costa Gomes 		if (admin)
1977a3d43c0dSVinicius Costa Gomes 			call_rcu(&admin->rcu, taprio_free_sched_cb);
1978a3d43c0dSVinicius Costa Gomes 
1979a3d43c0dSVinicius Costa Gomes 		spin_unlock_irqrestore(&q->current_entry_lock, flags);
19800763b3e8SIvan Khoronzhuk 
1981a9d62274SVinicius Costa Gomes 		if (FULL_OFFLOAD_IS_ENABLED(q->flags))
19820763b3e8SIvan Khoronzhuk 			taprio_offload_config_changed(q);
19834cfd5779SVedang Patel 	}
1984a3d43c0dSVinicius Costa Gomes 
19854cfd5779SVedang Patel 	new_admin = NULL;
1986a3d43c0dSVinicius Costa Gomes 	err = 0;
1987a3d43c0dSVinicius Costa Gomes 
1988a3d91b2cSVladimir Oltean 	if (!stab)
1989a3d91b2cSVladimir Oltean 		NL_SET_ERR_MSG_MOD(extack,
1990a3d91b2cSVladimir Oltean 				   "Size table not specified, frame length estimations may be inaccurate");
1991a3d91b2cSVladimir Oltean 
1992a3d43c0dSVinicius Costa Gomes unlock:
1993a3d43c0dSVinicius Costa Gomes 	spin_unlock_bh(qdisc_lock(sch));
1994a3d43c0dSVinicius Costa Gomes 
1995a3d43c0dSVinicius Costa Gomes free_sched:
199651650d33SIvan Khoronzhuk 	if (new_admin)
199751650d33SIvan Khoronzhuk 		call_rcu(&new_admin->rcu, taprio_free_sched_cb);
1998a3d43c0dSVinicius Costa Gomes 
1999a3d43c0dSVinicius Costa Gomes 	return err;
20005a781ccbSVinicius Costa Gomes }
20015a781ccbSVinicius Costa Gomes 
200244d4775cSDavide Caratti static void taprio_reset(struct Qdisc *sch)
200344d4775cSDavide Caratti {
200444d4775cSDavide Caratti 	struct taprio_sched *q = qdisc_priv(sch);
200544d4775cSDavide Caratti 	struct net_device *dev = qdisc_dev(sch);
200644d4775cSDavide Caratti 	int i;
200744d4775cSDavide Caratti 
200844d4775cSDavide Caratti 	hrtimer_cancel(&q->advance_timer);
20093a415d59SEric Dumazet 
201044d4775cSDavide Caratti 	if (q->qdiscs) {
2011698285daSDavide Caratti 		for (i = 0; i < dev->num_tx_queues; i++)
2012698285daSDavide Caratti 			if (q->qdiscs[i])
201344d4775cSDavide Caratti 				qdisc_reset(q->qdiscs[i]);
201444d4775cSDavide Caratti 	}
201544d4775cSDavide Caratti }
201644d4775cSDavide Caratti 
20175a781ccbSVinicius Costa Gomes static void taprio_destroy(struct Qdisc *sch)
20185a781ccbSVinicius Costa Gomes {
20195a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
20205a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
20219af23657SVladimir Oltean 	struct sched_gate_list *oper, *admin;
20225a781ccbSVinicius Costa Gomes 	unsigned int i;
20235a781ccbSVinicius Costa Gomes 
20247b9eba7bSLeandro Dorileo 	list_del(&q->taprio_list);
20257b9eba7bSLeandro Dorileo 
2026a56d447fSEric Dumazet 	/* Note that taprio_reset() might not be called if an error
2027a56d447fSEric Dumazet 	 * happens in qdisc_create(), after taprio_init() has been called.
2028a56d447fSEric Dumazet 	 */
2029a56d447fSEric Dumazet 	hrtimer_cancel(&q->advance_timer);
20303a415d59SEric Dumazet 	qdisc_synchronize(sch);
20315a781ccbSVinicius Costa Gomes 
20329c66d156SVinicius Costa Gomes 	taprio_disable_offload(dev, q, NULL);
20339c66d156SVinicius Costa Gomes 
20345a781ccbSVinicius Costa Gomes 	if (q->qdiscs) {
2035698285daSDavide Caratti 		for (i = 0; i < dev->num_tx_queues; i++)
20365a781ccbSVinicius Costa Gomes 			qdisc_put(q->qdiscs[i]);
20375a781ccbSVinicius Costa Gomes 
20385a781ccbSVinicius Costa Gomes 		kfree(q->qdiscs);
20395a781ccbSVinicius Costa Gomes 	}
20405a781ccbSVinicius Costa Gomes 	q->qdiscs = NULL;
20415a781ccbSVinicius Costa Gomes 
20427c16680aSVinicius Costa Gomes 	netdev_reset_tc(dev);
20435a781ccbSVinicius Costa Gomes 
20449af23657SVladimir Oltean 	oper = rtnl_dereference(q->oper_sched);
20459af23657SVladimir Oltean 	admin = rtnl_dereference(q->admin_sched);
2046a3d43c0dSVinicius Costa Gomes 
20479af23657SVladimir Oltean 	if (oper)
20489af23657SVladimir Oltean 		call_rcu(&oper->rcu, taprio_free_sched_cb);
20499af23657SVladimir Oltean 
20509af23657SVladimir Oltean 	if (admin)
20519af23657SVladimir Oltean 		call_rcu(&admin->rcu, taprio_free_sched_cb);
20522f530df7SVladimir Oltean 
20532f530df7SVladimir Oltean 	taprio_cleanup_broken_mqprio(q);
20545a781ccbSVinicius Costa Gomes }
20555a781ccbSVinicius Costa Gomes 
20565a781ccbSVinicius Costa Gomes static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
20575a781ccbSVinicius Costa Gomes 		       struct netlink_ext_ack *extack)
20585a781ccbSVinicius Costa Gomes {
20595a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
20605a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
2061a721c3e5SVladimir Oltean 	int i, tc;
20625a781ccbSVinicius Costa Gomes 
20635a781ccbSVinicius Costa Gomes 	spin_lock_init(&q->current_entry_lock);
20645a781ccbSVinicius Costa Gomes 
20655a781ccbSVinicius Costa Gomes 	hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
2066a3d43c0dSVinicius Costa Gomes 	q->advance_timer.function = advance_sched;
20675a781ccbSVinicius Costa Gomes 
20685a781ccbSVinicius Costa Gomes 	q->root = sch;
20695a781ccbSVinicius Costa Gomes 
20705a781ccbSVinicius Costa Gomes 	/* We only support static clockids. Use an invalid value as default
20715a781ccbSVinicius Costa Gomes 	 * and get the valid one on taprio_change().
20725a781ccbSVinicius Costa Gomes 	 */
20735a781ccbSVinicius Costa Gomes 	q->clockid = -1;
2074a9d62274SVinicius Costa Gomes 	q->flags = TAPRIO_FLAGS_INVALID;
20755a781ccbSVinicius Costa Gomes 
2076efb55222SVladimir Oltean 	list_add(&q->taprio_list, &taprio_list);
2077efb55222SVladimir Oltean 
2078026de64dSVladimir Oltean 	if (sch->parent != TC_H_ROOT) {
2079026de64dSVladimir Oltean 		NL_SET_ERR_MSG_MOD(extack, "Can only be attached as root qdisc");
20805a781ccbSVinicius Costa Gomes 		return -EOPNOTSUPP;
2081026de64dSVladimir Oltean 	}
20825a781ccbSVinicius Costa Gomes 
2083026de64dSVladimir Oltean 	if (!netif_is_multiqueue(dev)) {
2084026de64dSVladimir Oltean 		NL_SET_ERR_MSG_MOD(extack, "Multi-queue device is required");
20855a781ccbSVinicius Costa Gomes 		return -EOPNOTSUPP;
2086026de64dSVladimir Oltean 	}
20875a781ccbSVinicius Costa Gomes 
20885a781ccbSVinicius Costa Gomes 	/* pre-allocate qdisc, attachment can't fail */
20895a781ccbSVinicius Costa Gomes 	q->qdiscs = kcalloc(dev->num_tx_queues,
20905a781ccbSVinicius Costa Gomes 			    sizeof(q->qdiscs[0]),
20915a781ccbSVinicius Costa Gomes 			    GFP_KERNEL);
20925a781ccbSVinicius Costa Gomes 
20935a781ccbSVinicius Costa Gomes 	if (!q->qdiscs)
20945a781ccbSVinicius Costa Gomes 		return -ENOMEM;
20955a781ccbSVinicius Costa Gomes 
20965a781ccbSVinicius Costa Gomes 	if (!opt)
20975a781ccbSVinicius Costa Gomes 		return -EINVAL;
20985a781ccbSVinicius Costa Gomes 
2099a3d43c0dSVinicius Costa Gomes 	for (i = 0; i < dev->num_tx_queues; i++) {
2100a3d43c0dSVinicius Costa Gomes 		struct netdev_queue *dev_queue;
2101a3d43c0dSVinicius Costa Gomes 		struct Qdisc *qdisc;
2102a3d43c0dSVinicius Costa Gomes 
2103a3d43c0dSVinicius Costa Gomes 		dev_queue = netdev_get_tx_queue(dev, i);
2104a3d43c0dSVinicius Costa Gomes 		qdisc = qdisc_create_dflt(dev_queue,
2105a3d43c0dSVinicius Costa Gomes 					  &pfifo_qdisc_ops,
2106a3d43c0dSVinicius Costa Gomes 					  TC_H_MAKE(TC_H_MAJ(sch->handle),
2107a3d43c0dSVinicius Costa Gomes 						    TC_H_MIN(i + 1)),
2108a3d43c0dSVinicius Costa Gomes 					  extack);
2109a3d43c0dSVinicius Costa Gomes 		if (!qdisc)
2110a3d43c0dSVinicius Costa Gomes 			return -ENOMEM;
2111a3d43c0dSVinicius Costa Gomes 
2112a3d43c0dSVinicius Costa Gomes 		if (i < dev->real_num_tx_queues)
2113a3d43c0dSVinicius Costa Gomes 			qdisc_hash_add(qdisc, false);
2114a3d43c0dSVinicius Costa Gomes 
2115a3d43c0dSVinicius Costa Gomes 		q->qdiscs[i] = qdisc;
2116a3d43c0dSVinicius Costa Gomes 	}
2117a3d43c0dSVinicius Costa Gomes 
2118a721c3e5SVladimir Oltean 	for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
2119a721c3e5SVladimir Oltean 		q->fp[tc] = TC_FP_EXPRESS;
2120a721c3e5SVladimir Oltean 
21212f530df7SVladimir Oltean 	taprio_detect_broken_mqprio(q);
21222f530df7SVladimir Oltean 
21235a781ccbSVinicius Costa Gomes 	return taprio_change(sch, opt, extack);
21245a781ccbSVinicius Costa Gomes }
21255a781ccbSVinicius Costa Gomes 
212613511704SYannick Vignon static void taprio_attach(struct Qdisc *sch)
212713511704SYannick Vignon {
212813511704SYannick Vignon 	struct taprio_sched *q = qdisc_priv(sch);
212913511704SYannick Vignon 	struct net_device *dev = qdisc_dev(sch);
213013511704SYannick Vignon 	unsigned int ntx;
213113511704SYannick Vignon 
213213511704SYannick Vignon 	/* Attach underlying qdisc */
213313511704SYannick Vignon 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
213413511704SYannick Vignon 		struct Qdisc *qdisc = q->qdiscs[ntx];
213513511704SYannick Vignon 		struct Qdisc *old;
213613511704SYannick Vignon 
213713511704SYannick Vignon 		if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
213813511704SYannick Vignon 			qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
213913511704SYannick Vignon 			old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
214013511704SYannick Vignon 		} else {
214113511704SYannick Vignon 			old = dev_graft_qdisc(qdisc->dev_queue, sch);
214213511704SYannick Vignon 			qdisc_refcount_inc(sch);
214313511704SYannick Vignon 		}
214413511704SYannick Vignon 		if (old)
214513511704SYannick Vignon 			qdisc_put(old);
214613511704SYannick Vignon 	}
214713511704SYannick Vignon 
214813511704SYannick Vignon 	/* access to the child qdiscs is not needed in offload mode */
214913511704SYannick Vignon 	if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
215013511704SYannick Vignon 		kfree(q->qdiscs);
215113511704SYannick Vignon 		q->qdiscs = NULL;
215213511704SYannick Vignon 	}
215313511704SYannick Vignon }
215413511704SYannick Vignon 
21555a781ccbSVinicius Costa Gomes static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
21565a781ccbSVinicius Costa Gomes 					     unsigned long cl)
21575a781ccbSVinicius Costa Gomes {
21585a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
21595a781ccbSVinicius Costa Gomes 	unsigned long ntx = cl - 1;
21605a781ccbSVinicius Costa Gomes 
21615a781ccbSVinicius Costa Gomes 	if (ntx >= dev->num_tx_queues)
21625a781ccbSVinicius Costa Gomes 		return NULL;
21635a781ccbSVinicius Costa Gomes 
21645a781ccbSVinicius Costa Gomes 	return netdev_get_tx_queue(dev, ntx);
21655a781ccbSVinicius Costa Gomes }
21665a781ccbSVinicius Costa Gomes 
21675a781ccbSVinicius Costa Gomes static int taprio_graft(struct Qdisc *sch, unsigned long cl,
21685a781ccbSVinicius Costa Gomes 			struct Qdisc *new, struct Qdisc **old,
21695a781ccbSVinicius Costa Gomes 			struct netlink_ext_ack *extack)
21705a781ccbSVinicius Costa Gomes {
21715a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
21725a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
21735a781ccbSVinicius Costa Gomes 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
21745a781ccbSVinicius Costa Gomes 
21755a781ccbSVinicius Costa Gomes 	if (!dev_queue)
21765a781ccbSVinicius Costa Gomes 		return -EINVAL;
21775a781ccbSVinicius Costa Gomes 
21785a781ccbSVinicius Costa Gomes 	if (dev->flags & IFF_UP)
21795a781ccbSVinicius Costa Gomes 		dev_deactivate(dev);
21805a781ccbSVinicius Costa Gomes 
218113511704SYannick Vignon 	if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
218213511704SYannick Vignon 		*old = dev_graft_qdisc(dev_queue, new);
218313511704SYannick Vignon 	} else {
21845a781ccbSVinicius Costa Gomes 		*old = q->qdiscs[cl - 1];
21855a781ccbSVinicius Costa Gomes 		q->qdiscs[cl - 1] = new;
218613511704SYannick Vignon 	}
21875a781ccbSVinicius Costa Gomes 
21885a781ccbSVinicius Costa Gomes 	if (new)
21895a781ccbSVinicius Costa Gomes 		new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
21905a781ccbSVinicius Costa Gomes 
21915a781ccbSVinicius Costa Gomes 	if (dev->flags & IFF_UP)
21925a781ccbSVinicius Costa Gomes 		dev_activate(dev);
21935a781ccbSVinicius Costa Gomes 
21945a781ccbSVinicius Costa Gomes 	return 0;
21955a781ccbSVinicius Costa Gomes }
21965a781ccbSVinicius Costa Gomes 
21975a781ccbSVinicius Costa Gomes static int dump_entry(struct sk_buff *msg,
21985a781ccbSVinicius Costa Gomes 		      const struct sched_entry *entry)
21995a781ccbSVinicius Costa Gomes {
22005a781ccbSVinicius Costa Gomes 	struct nlattr *item;
22015a781ccbSVinicius Costa Gomes 
2202ae0be8deSMichal Kubecek 	item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY);
22035a781ccbSVinicius Costa Gomes 	if (!item)
22045a781ccbSVinicius Costa Gomes 		return -ENOSPC;
22055a781ccbSVinicius Costa Gomes 
22065a781ccbSVinicius Costa Gomes 	if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index))
22075a781ccbSVinicius Costa Gomes 		goto nla_put_failure;
22085a781ccbSVinicius Costa Gomes 
22095a781ccbSVinicius Costa Gomes 	if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command))
22105a781ccbSVinicius Costa Gomes 		goto nla_put_failure;
22115a781ccbSVinicius Costa Gomes 
22125a781ccbSVinicius Costa Gomes 	if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK,
22135a781ccbSVinicius Costa Gomes 			entry->gate_mask))
22145a781ccbSVinicius Costa Gomes 		goto nla_put_failure;
22155a781ccbSVinicius Costa Gomes 
22165a781ccbSVinicius Costa Gomes 	if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL,
22175a781ccbSVinicius Costa Gomes 			entry->interval))
22185a781ccbSVinicius Costa Gomes 		goto nla_put_failure;
22195a781ccbSVinicius Costa Gomes 
22205a781ccbSVinicius Costa Gomes 	return nla_nest_end(msg, item);
22215a781ccbSVinicius Costa Gomes 
22225a781ccbSVinicius Costa Gomes nla_put_failure:
22235a781ccbSVinicius Costa Gomes 	nla_nest_cancel(msg, item);
22245a781ccbSVinicius Costa Gomes 	return -1;
22255a781ccbSVinicius Costa Gomes }
22265a781ccbSVinicius Costa Gomes 
2227a3d43c0dSVinicius Costa Gomes static int dump_schedule(struct sk_buff *msg,
2228a3d43c0dSVinicius Costa Gomes 			 const struct sched_gate_list *root)
2229a3d43c0dSVinicius Costa Gomes {
2230a3d43c0dSVinicius Costa Gomes 	struct nlattr *entry_list;
2231a3d43c0dSVinicius Costa Gomes 	struct sched_entry *entry;
2232a3d43c0dSVinicius Costa Gomes 
2233a3d43c0dSVinicius Costa Gomes 	if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME,
2234a3d43c0dSVinicius Costa Gomes 			root->base_time, TCA_TAPRIO_PAD))
2235a3d43c0dSVinicius Costa Gomes 		return -1;
2236a3d43c0dSVinicius Costa Gomes 
22376ca6a665SVinicius Costa Gomes 	if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME,
22386ca6a665SVinicius Costa Gomes 			root->cycle_time, TCA_TAPRIO_PAD))
22396ca6a665SVinicius Costa Gomes 		return -1;
22406ca6a665SVinicius Costa Gomes 
2241c25031e9SVinicius Costa Gomes 	if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
2242c25031e9SVinicius Costa Gomes 			root->cycle_time_extension, TCA_TAPRIO_PAD))
2243c25031e9SVinicius Costa Gomes 		return -1;
2244c25031e9SVinicius Costa Gomes 
2245a3d43c0dSVinicius Costa Gomes 	entry_list = nla_nest_start_noflag(msg,
2246a3d43c0dSVinicius Costa Gomes 					   TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST);
2247a3d43c0dSVinicius Costa Gomes 	if (!entry_list)
2248a3d43c0dSVinicius Costa Gomes 		goto error_nest;
2249a3d43c0dSVinicius Costa Gomes 
2250a3d43c0dSVinicius Costa Gomes 	list_for_each_entry(entry, &root->entries, list) {
2251a3d43c0dSVinicius Costa Gomes 		if (dump_entry(msg, entry) < 0)
2252a3d43c0dSVinicius Costa Gomes 			goto error_nest;
2253a3d43c0dSVinicius Costa Gomes 	}
2254a3d43c0dSVinicius Costa Gomes 
2255a3d43c0dSVinicius Costa Gomes 	nla_nest_end(msg, entry_list);
2256a3d43c0dSVinicius Costa Gomes 	return 0;
2257a3d43c0dSVinicius Costa Gomes 
2258a3d43c0dSVinicius Costa Gomes error_nest:
2259a3d43c0dSVinicius Costa Gomes 	nla_nest_cancel(msg, entry_list);
2260a3d43c0dSVinicius Costa Gomes 	return -1;
2261a3d43c0dSVinicius Costa Gomes }
2262a3d43c0dSVinicius Costa Gomes 
2263fed87cc6SVladimir Oltean static int taprio_dump_tc_entries(struct sk_buff *skb,
2264a721c3e5SVladimir Oltean 				  struct taprio_sched *q,
2265fed87cc6SVladimir Oltean 				  struct sched_gate_list *sched)
2266a54fc09eSVladimir Oltean {
2267a54fc09eSVladimir Oltean 	struct nlattr *n;
2268a54fc09eSVladimir Oltean 	int tc;
2269a54fc09eSVladimir Oltean 
2270a54fc09eSVladimir Oltean 	for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
2271a54fc09eSVladimir Oltean 		n = nla_nest_start(skb, TCA_TAPRIO_ATTR_TC_ENTRY);
2272a54fc09eSVladimir Oltean 		if (!n)
2273a54fc09eSVladimir Oltean 			return -EMSGSIZE;
2274a54fc09eSVladimir Oltean 
2275a54fc09eSVladimir Oltean 		if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_INDEX, tc))
2276a54fc09eSVladimir Oltean 			goto nla_put_failure;
2277a54fc09eSVladimir Oltean 
2278a54fc09eSVladimir Oltean 		if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_MAX_SDU,
2279fed87cc6SVladimir Oltean 				sched->max_sdu[tc]))
2280a54fc09eSVladimir Oltean 			goto nla_put_failure;
2281a54fc09eSVladimir Oltean 
2282a721c3e5SVladimir Oltean 		if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_FP, q->fp[tc]))
2283a721c3e5SVladimir Oltean 			goto nla_put_failure;
2284a721c3e5SVladimir Oltean 
2285a54fc09eSVladimir Oltean 		nla_nest_end(skb, n);
2286a54fc09eSVladimir Oltean 	}
2287a54fc09eSVladimir Oltean 
2288a54fc09eSVladimir Oltean 	return 0;
2289a54fc09eSVladimir Oltean 
2290a54fc09eSVladimir Oltean nla_put_failure:
2291a54fc09eSVladimir Oltean 	nla_nest_cancel(skb, n);
2292a54fc09eSVladimir Oltean 	return -EMSGSIZE;
2293a54fc09eSVladimir Oltean }
2294a54fc09eSVladimir Oltean 
22956c1adb65SVladimir Oltean static int taprio_put_stat(struct sk_buff *skb, u64 val, u16 attrtype)
22966c1adb65SVladimir Oltean {
22976c1adb65SVladimir Oltean 	if (val == TAPRIO_STAT_NOT_SET)
22986c1adb65SVladimir Oltean 		return 0;
22996c1adb65SVladimir Oltean 	if (nla_put_u64_64bit(skb, attrtype, val, TCA_TAPRIO_OFFLOAD_STATS_PAD))
23006c1adb65SVladimir Oltean 		return -EMSGSIZE;
23016c1adb65SVladimir Oltean 	return 0;
23026c1adb65SVladimir Oltean }
23036c1adb65SVladimir Oltean 
23046c1adb65SVladimir Oltean static int taprio_dump_xstats(struct Qdisc *sch, struct gnet_dump *d,
23056c1adb65SVladimir Oltean 			      struct tc_taprio_qopt_offload *offload,
23066c1adb65SVladimir Oltean 			      struct tc_taprio_qopt_stats *stats)
23076c1adb65SVladimir Oltean {
23086c1adb65SVladimir Oltean 	struct net_device *dev = qdisc_dev(sch);
23096c1adb65SVladimir Oltean 	const struct net_device_ops *ops;
23106c1adb65SVladimir Oltean 	struct sk_buff *skb = d->skb;
23116c1adb65SVladimir Oltean 	struct nlattr *xstats;
23126c1adb65SVladimir Oltean 	int err;
23136c1adb65SVladimir Oltean 
23146c1adb65SVladimir Oltean 	ops = qdisc_dev(sch)->netdev_ops;
23156c1adb65SVladimir Oltean 
23166c1adb65SVladimir Oltean 	/* FIXME I could use qdisc_offload_dump_helper(), but that messes
23176c1adb65SVladimir Oltean 	 * with sch->flags depending on whether the device reports taprio
23186c1adb65SVladimir Oltean 	 * stats, and I'm not sure whether that's a good idea, considering
23196c1adb65SVladimir Oltean 	 * that stats are optional to the offload itself
23206c1adb65SVladimir Oltean 	 */
23216c1adb65SVladimir Oltean 	if (!ops->ndo_setup_tc)
23226c1adb65SVladimir Oltean 		return 0;
23236c1adb65SVladimir Oltean 
23246c1adb65SVladimir Oltean 	memset(stats, 0xff, sizeof(*stats));
23256c1adb65SVladimir Oltean 
23266c1adb65SVladimir Oltean 	err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
23276c1adb65SVladimir Oltean 	if (err == -EOPNOTSUPP)
23286c1adb65SVladimir Oltean 		return 0;
23296c1adb65SVladimir Oltean 	if (err)
23306c1adb65SVladimir Oltean 		return err;
23316c1adb65SVladimir Oltean 
23326c1adb65SVladimir Oltean 	xstats = nla_nest_start(skb, TCA_STATS_APP);
23336c1adb65SVladimir Oltean 	if (!xstats)
23346c1adb65SVladimir Oltean 		goto err;
23356c1adb65SVladimir Oltean 
23366c1adb65SVladimir Oltean 	if (taprio_put_stat(skb, stats->window_drops,
23376c1adb65SVladimir Oltean 			    TCA_TAPRIO_OFFLOAD_STATS_WINDOW_DROPS) ||
23386c1adb65SVladimir Oltean 	    taprio_put_stat(skb, stats->tx_overruns,
23396c1adb65SVladimir Oltean 			    TCA_TAPRIO_OFFLOAD_STATS_TX_OVERRUNS))
23406c1adb65SVladimir Oltean 		goto err_cancel;
23416c1adb65SVladimir Oltean 
23426c1adb65SVladimir Oltean 	nla_nest_end(skb, xstats);
23436c1adb65SVladimir Oltean 
23446c1adb65SVladimir Oltean 	return 0;
23456c1adb65SVladimir Oltean 
23466c1adb65SVladimir Oltean err_cancel:
23476c1adb65SVladimir Oltean 	nla_nest_cancel(skb, xstats);
23486c1adb65SVladimir Oltean err:
23496c1adb65SVladimir Oltean 	return -EMSGSIZE;
23506c1adb65SVladimir Oltean }
23516c1adb65SVladimir Oltean 
23526c1adb65SVladimir Oltean static int taprio_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
23536c1adb65SVladimir Oltean {
23546c1adb65SVladimir Oltean 	struct tc_taprio_qopt_offload offload = {
23556c1adb65SVladimir Oltean 		.cmd = TAPRIO_CMD_STATS,
23566c1adb65SVladimir Oltean 	};
23576c1adb65SVladimir Oltean 
23586c1adb65SVladimir Oltean 	return taprio_dump_xstats(sch, d, &offload, &offload.stats);
23596c1adb65SVladimir Oltean }
23606c1adb65SVladimir Oltean 
23615a781ccbSVinicius Costa Gomes static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
23625a781ccbSVinicius Costa Gomes {
23635a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
23645a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
2365a3d43c0dSVinicius Costa Gomes 	struct sched_gate_list *oper, *admin;
23665a781ccbSVinicius Costa Gomes 	struct tc_mqprio_qopt opt = { 0 };
2367a3d43c0dSVinicius Costa Gomes 	struct nlattr *nest, *sched_nest;
23685a781ccbSVinicius Costa Gomes 
236918cdd2f0SVladimir Oltean 	oper = rtnl_dereference(q->oper_sched);
237018cdd2f0SVladimir Oltean 	admin = rtnl_dereference(q->admin_sched);
2371a3d43c0dSVinicius Costa Gomes 
23729dd6ad67SVladimir Oltean 	mqprio_qopt_reconstruct(dev, &opt);
23735a781ccbSVinicius Costa Gomes 
2374ae0be8deSMichal Kubecek 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
23755a781ccbSVinicius Costa Gomes 	if (!nest)
2376a3d43c0dSVinicius Costa Gomes 		goto start_error;
23775a781ccbSVinicius Costa Gomes 
23785a781ccbSVinicius Costa Gomes 	if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
23795a781ccbSVinicius Costa Gomes 		goto options_error;
23805a781ccbSVinicius Costa Gomes 
23819c66d156SVinicius Costa Gomes 	if (!FULL_OFFLOAD_IS_ENABLED(q->flags) &&
23829c66d156SVinicius Costa Gomes 	    nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
23835a781ccbSVinicius Costa Gomes 		goto options_error;
23845a781ccbSVinicius Costa Gomes 
23854cfd5779SVedang Patel 	if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags))
23864cfd5779SVedang Patel 		goto options_error;
23874cfd5779SVedang Patel 
23884cfd5779SVedang Patel 	if (q->txtime_delay &&
2389a5b64700SVedang Patel 	    nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
23904cfd5779SVedang Patel 		goto options_error;
23914cfd5779SVedang Patel 
2392a721c3e5SVladimir Oltean 	if (oper && taprio_dump_tc_entries(skb, q, oper))
2393a54fc09eSVladimir Oltean 		goto options_error;
2394a54fc09eSVladimir Oltean 
2395a3d43c0dSVinicius Costa Gomes 	if (oper && dump_schedule(skb, oper))
23965a781ccbSVinicius Costa Gomes 		goto options_error;
23975a781ccbSVinicius Costa Gomes 
2398a3d43c0dSVinicius Costa Gomes 	if (!admin)
2399a3d43c0dSVinicius Costa Gomes 		goto done;
24005a781ccbSVinicius Costa Gomes 
2401a3d43c0dSVinicius Costa Gomes 	sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED);
2402e4acf427SColin Ian King 	if (!sched_nest)
2403e4acf427SColin Ian King 		goto options_error;
2404a3d43c0dSVinicius Costa Gomes 
2405a3d43c0dSVinicius Costa Gomes 	if (dump_schedule(skb, admin))
2406a3d43c0dSVinicius Costa Gomes 		goto admin_error;
2407a3d43c0dSVinicius Costa Gomes 
2408a3d43c0dSVinicius Costa Gomes 	nla_nest_end(skb, sched_nest);
2409a3d43c0dSVinicius Costa Gomes 
2410a3d43c0dSVinicius Costa Gomes done:
24115a781ccbSVinicius Costa Gomes 	return nla_nest_end(skb, nest);
24125a781ccbSVinicius Costa Gomes 
2413a3d43c0dSVinicius Costa Gomes admin_error:
2414a3d43c0dSVinicius Costa Gomes 	nla_nest_cancel(skb, sched_nest);
2415a3d43c0dSVinicius Costa Gomes 
24165a781ccbSVinicius Costa Gomes options_error:
24175a781ccbSVinicius Costa Gomes 	nla_nest_cancel(skb, nest);
2418a3d43c0dSVinicius Costa Gomes 
2419a3d43c0dSVinicius Costa Gomes start_error:
2420a3d43c0dSVinicius Costa Gomes 	return -ENOSPC;
24215a781ccbSVinicius Costa Gomes }
24225a781ccbSVinicius Costa Gomes 
24235a781ccbSVinicius Costa Gomes static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
24245a781ccbSVinicius Costa Gomes {
2425af7b29b1SVladimir Oltean 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
24265a781ccbSVinicius Costa Gomes 
2427af7b29b1SVladimir Oltean 	if (!dev_queue)
24285a781ccbSVinicius Costa Gomes 		return NULL;
24295a781ccbSVinicius Costa Gomes 
2430d636fc5dSEric Dumazet 	return rtnl_dereference(dev_queue->qdisc_sleeping);
24315a781ccbSVinicius Costa Gomes }
24325a781ccbSVinicius Costa Gomes 
24335a781ccbSVinicius Costa Gomes static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
24345a781ccbSVinicius Costa Gomes {
24355a781ccbSVinicius Costa Gomes 	unsigned int ntx = TC_H_MIN(classid);
24365a781ccbSVinicius Costa Gomes 
24375a781ccbSVinicius Costa Gomes 	if (!taprio_queue_get(sch, ntx))
24385a781ccbSVinicius Costa Gomes 		return 0;
24395a781ccbSVinicius Costa Gomes 	return ntx;
24405a781ccbSVinicius Costa Gomes }
24415a781ccbSVinicius Costa Gomes 
24425a781ccbSVinicius Costa Gomes static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
24435a781ccbSVinicius Costa Gomes 			     struct sk_buff *skb, struct tcmsg *tcm)
24445a781ccbSVinicius Costa Gomes {
24455a781ccbSVinicius Costa Gomes 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
24465a781ccbSVinicius Costa Gomes 
24475a781ccbSVinicius Costa Gomes 	tcm->tcm_parent = TC_H_ROOT;
24485a781ccbSVinicius Costa Gomes 	tcm->tcm_handle |= TC_H_MIN(cl);
2449d636fc5dSEric Dumazet 	tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle;
24505a781ccbSVinicius Costa Gomes 
24515a781ccbSVinicius Costa Gomes 	return 0;
24525a781ccbSVinicius Costa Gomes }
24535a781ccbSVinicius Costa Gomes 
24545a781ccbSVinicius Costa Gomes static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
24555a781ccbSVinicius Costa Gomes 				   struct gnet_dump *d)
24565a781ccbSVinicius Costa Gomes 	__releases(d->lock)
24575a781ccbSVinicius Costa Gomes 	__acquires(d->lock)
24585a781ccbSVinicius Costa Gomes {
24595a781ccbSVinicius Costa Gomes 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
24606c1adb65SVladimir Oltean 	struct tc_taprio_qopt_offload offload = {
2461*2b84960fSVladimir Oltean 		.cmd = TAPRIO_CMD_QUEUE_STATS,
2462*2b84960fSVladimir Oltean 		.queue_stats = {
2463*2b84960fSVladimir Oltean 			.queue = cl - 1,
24646c1adb65SVladimir Oltean 		},
24656c1adb65SVladimir Oltean 	};
2466449f6bc1SJakub Kicinski 	struct Qdisc *child;
24675a781ccbSVinicius Costa Gomes 
2468449f6bc1SJakub Kicinski 	child = rtnl_dereference(dev_queue->qdisc_sleeping);
2469dced11efSVladimir Oltean 	if (gnet_stats_copy_basic(d, NULL, &child->bstats, true) < 0 ||
2470dced11efSVladimir Oltean 	    qdisc_qstats_copy(d, child) < 0)
24715a781ccbSVinicius Costa Gomes 		return -1;
24726c1adb65SVladimir Oltean 
2473*2b84960fSVladimir Oltean 	return taprio_dump_xstats(sch, d, &offload, &offload.queue_stats.stats);
24745a781ccbSVinicius Costa Gomes }
24755a781ccbSVinicius Costa Gomes 
24765a781ccbSVinicius Costa Gomes static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
24775a781ccbSVinicius Costa Gomes {
24785a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
24795a781ccbSVinicius Costa Gomes 	unsigned long ntx;
24805a781ccbSVinicius Costa Gomes 
24815a781ccbSVinicius Costa Gomes 	if (arg->stop)
24825a781ccbSVinicius Costa Gomes 		return;
24835a781ccbSVinicius Costa Gomes 
24845a781ccbSVinicius Costa Gomes 	arg->count = arg->skip;
24855a781ccbSVinicius Costa Gomes 	for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
2486e046fa89SZhengchao Shao 		if (!tc_qdisc_stats_dump(sch, ntx + 1, arg))
24875a781ccbSVinicius Costa Gomes 			break;
24885a781ccbSVinicius Costa Gomes 	}
24895a781ccbSVinicius Costa Gomes }
24905a781ccbSVinicius Costa Gomes 
24915a781ccbSVinicius Costa Gomes static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
24925a781ccbSVinicius Costa Gomes 						struct tcmsg *tcm)
24935a781ccbSVinicius Costa Gomes {
24945a781ccbSVinicius Costa Gomes 	return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
24955a781ccbSVinicius Costa Gomes }
24965a781ccbSVinicius Costa Gomes 
24975a781ccbSVinicius Costa Gomes static const struct Qdisc_class_ops taprio_class_ops = {
24985a781ccbSVinicius Costa Gomes 	.graft		= taprio_graft,
24995a781ccbSVinicius Costa Gomes 	.leaf		= taprio_leaf,
25005a781ccbSVinicius Costa Gomes 	.find		= taprio_find,
25015a781ccbSVinicius Costa Gomes 	.walk		= taprio_walk,
25025a781ccbSVinicius Costa Gomes 	.dump		= taprio_dump_class,
25035a781ccbSVinicius Costa Gomes 	.dump_stats	= taprio_dump_class_stats,
25045a781ccbSVinicius Costa Gomes 	.select_queue	= taprio_select_queue,
25055a781ccbSVinicius Costa Gomes };
25065a781ccbSVinicius Costa Gomes 
25075a781ccbSVinicius Costa Gomes static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
25085a781ccbSVinicius Costa Gomes 	.cl_ops		= &taprio_class_ops,
25095a781ccbSVinicius Costa Gomes 	.id		= "taprio",
25105a781ccbSVinicius Costa Gomes 	.priv_size	= sizeof(struct taprio_sched),
25115a781ccbSVinicius Costa Gomes 	.init		= taprio_init,
2512a3d43c0dSVinicius Costa Gomes 	.change		= taprio_change,
25135a781ccbSVinicius Costa Gomes 	.destroy	= taprio_destroy,
251444d4775cSDavide Caratti 	.reset		= taprio_reset,
251513511704SYannick Vignon 	.attach		= taprio_attach,
25165a781ccbSVinicius Costa Gomes 	.peek		= taprio_peek,
25175a781ccbSVinicius Costa Gomes 	.dequeue	= taprio_dequeue,
25185a781ccbSVinicius Costa Gomes 	.enqueue	= taprio_enqueue,
25195a781ccbSVinicius Costa Gomes 	.dump		= taprio_dump,
25206c1adb65SVladimir Oltean 	.dump_stats	= taprio_dump_stats,
25215a781ccbSVinicius Costa Gomes 	.owner		= THIS_MODULE,
25225a781ccbSVinicius Costa Gomes };
25235a781ccbSVinicius Costa Gomes 
25247b9eba7bSLeandro Dorileo static struct notifier_block taprio_device_notifier = {
25257b9eba7bSLeandro Dorileo 	.notifier_call = taprio_dev_notifier,
25267b9eba7bSLeandro Dorileo };
25277b9eba7bSLeandro Dorileo 
25285a781ccbSVinicius Costa Gomes static int __init taprio_module_init(void)
25295a781ccbSVinicius Costa Gomes {
25307b9eba7bSLeandro Dorileo 	int err = register_netdevice_notifier(&taprio_device_notifier);
25317b9eba7bSLeandro Dorileo 
25327b9eba7bSLeandro Dorileo 	if (err)
25337b9eba7bSLeandro Dorileo 		return err;
25347b9eba7bSLeandro Dorileo 
25355a781ccbSVinicius Costa Gomes 	return register_qdisc(&taprio_qdisc_ops);
25365a781ccbSVinicius Costa Gomes }
25375a781ccbSVinicius Costa Gomes 
25385a781ccbSVinicius Costa Gomes static void __exit taprio_module_exit(void)
25395a781ccbSVinicius Costa Gomes {
25405a781ccbSVinicius Costa Gomes 	unregister_qdisc(&taprio_qdisc_ops);
25417b9eba7bSLeandro Dorileo 	unregister_netdevice_notifier(&taprio_device_notifier);
25425a781ccbSVinicius Costa Gomes }
25435a781ccbSVinicius Costa Gomes 
25445a781ccbSVinicius Costa Gomes module_init(taprio_module_init);
25455a781ccbSVinicius Costa Gomes module_exit(taprio_module_exit);
25465a781ccbSVinicius Costa Gomes MODULE_LICENSE("GPL");
2547