xref: /openbmc/linux/net/sched/sch_taprio.c (revision 037be0374078e205ca802ca8716dabb6064f940e)
15a781ccbSVinicius Costa Gomes // SPDX-License-Identifier: GPL-2.0
25a781ccbSVinicius Costa Gomes 
35a781ccbSVinicius Costa Gomes /* net/sched/sch_taprio.c	 Time Aware Priority Scheduler
45a781ccbSVinicius Costa Gomes  *
55a781ccbSVinicius Costa Gomes  * Authors:	Vinicius Costa Gomes <vinicius.gomes@intel.com>
65a781ccbSVinicius Costa Gomes  *
75a781ccbSVinicius Costa Gomes  */
85a781ccbSVinicius Costa Gomes 
95a781ccbSVinicius Costa Gomes #include <linux/types.h>
105a781ccbSVinicius Costa Gomes #include <linux/slab.h>
115a781ccbSVinicius Costa Gomes #include <linux/kernel.h>
125a781ccbSVinicius Costa Gomes #include <linux/string.h>
135a781ccbSVinicius Costa Gomes #include <linux/list.h>
145a781ccbSVinicius Costa Gomes #include <linux/errno.h>
155a781ccbSVinicius Costa Gomes #include <linux/skbuff.h>
1623bddf69SJakub Kicinski #include <linux/math64.h>
175a781ccbSVinicius Costa Gomes #include <linux/module.h>
185a781ccbSVinicius Costa Gomes #include <linux/spinlock.h>
19a3d43c0dSVinicius Costa Gomes #include <linux/rcupdate.h>
205a781ccbSVinicius Costa Gomes #include <net/netlink.h>
215a781ccbSVinicius Costa Gomes #include <net/pkt_sched.h>
225a781ccbSVinicius Costa Gomes #include <net/pkt_cls.h>
235a781ccbSVinicius Costa Gomes #include <net/sch_generic.h>
245a781ccbSVinicius Costa Gomes 
257b9eba7bSLeandro Dorileo static LIST_HEAD(taprio_list);
267b9eba7bSLeandro Dorileo static DEFINE_SPINLOCK(taprio_list_lock);
277b9eba7bSLeandro Dorileo 
285a781ccbSVinicius Costa Gomes #define TAPRIO_ALL_GATES_OPEN -1
295a781ccbSVinicius Costa Gomes 
305a781ccbSVinicius Costa Gomes struct sched_entry {
315a781ccbSVinicius Costa Gomes 	struct list_head list;
325a781ccbSVinicius Costa Gomes 
335a781ccbSVinicius Costa Gomes 	/* The instant that this entry "closes" and the next one
345a781ccbSVinicius Costa Gomes 	 * should open, the qdisc will make some effort so that no
355a781ccbSVinicius Costa Gomes 	 * packet leaves after this time.
365a781ccbSVinicius Costa Gomes 	 */
375a781ccbSVinicius Costa Gomes 	ktime_t close_time;
385a781ccbSVinicius Costa Gomes 	atomic_t budget;
395a781ccbSVinicius Costa Gomes 	int index;
405a781ccbSVinicius Costa Gomes 	u32 gate_mask;
415a781ccbSVinicius Costa Gomes 	u32 interval;
425a781ccbSVinicius Costa Gomes 	u8 command;
435a781ccbSVinicius Costa Gomes };
445a781ccbSVinicius Costa Gomes 
45a3d43c0dSVinicius Costa Gomes struct sched_gate_list {
46a3d43c0dSVinicius Costa Gomes 	struct rcu_head rcu;
47a3d43c0dSVinicius Costa Gomes 	struct list_head entries;
48a3d43c0dSVinicius Costa Gomes 	size_t num_entries;
496ca6a665SVinicius Costa Gomes 	ktime_t cycle_close_time;
506ca6a665SVinicius Costa Gomes 	s64 cycle_time;
51c25031e9SVinicius Costa Gomes 	s64 cycle_time_extension;
52a3d43c0dSVinicius Costa Gomes 	s64 base_time;
53a3d43c0dSVinicius Costa Gomes };
54a3d43c0dSVinicius Costa Gomes 
555a781ccbSVinicius Costa Gomes struct taprio_sched {
565a781ccbSVinicius Costa Gomes 	struct Qdisc **qdiscs;
575a781ccbSVinicius Costa Gomes 	struct Qdisc *root;
585a781ccbSVinicius Costa Gomes 	int clockid;
597b9eba7bSLeandro Dorileo 	atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
605a781ccbSVinicius Costa Gomes 				    * speeds it's sub-nanoseconds per byte
615a781ccbSVinicius Costa Gomes 				    */
625a781ccbSVinicius Costa Gomes 
635a781ccbSVinicius Costa Gomes 	/* Protects the update side of the RCU protected current_entry */
645a781ccbSVinicius Costa Gomes 	spinlock_t current_entry_lock;
655a781ccbSVinicius Costa Gomes 	struct sched_entry __rcu *current_entry;
66a3d43c0dSVinicius Costa Gomes 	struct sched_gate_list __rcu *oper_sched;
67a3d43c0dSVinicius Costa Gomes 	struct sched_gate_list __rcu *admin_sched;
685a781ccbSVinicius Costa Gomes 	ktime_t (*get_time)(void);
695a781ccbSVinicius Costa Gomes 	struct hrtimer advance_timer;
707b9eba7bSLeandro Dorileo 	struct list_head taprio_list;
715a781ccbSVinicius Costa Gomes };
725a781ccbSVinicius Costa Gomes 
73a3d43c0dSVinicius Costa Gomes static ktime_t sched_base_time(const struct sched_gate_list *sched)
74a3d43c0dSVinicius Costa Gomes {
75a3d43c0dSVinicius Costa Gomes 	if (!sched)
76a3d43c0dSVinicius Costa Gomes 		return KTIME_MAX;
77a3d43c0dSVinicius Costa Gomes 
78a3d43c0dSVinicius Costa Gomes 	return ns_to_ktime(sched->base_time);
79a3d43c0dSVinicius Costa Gomes }
80a3d43c0dSVinicius Costa Gomes 
81a3d43c0dSVinicius Costa Gomes static void taprio_free_sched_cb(struct rcu_head *head)
82a3d43c0dSVinicius Costa Gomes {
83a3d43c0dSVinicius Costa Gomes 	struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu);
84a3d43c0dSVinicius Costa Gomes 	struct sched_entry *entry, *n;
85a3d43c0dSVinicius Costa Gomes 
86a3d43c0dSVinicius Costa Gomes 	if (!sched)
87a3d43c0dSVinicius Costa Gomes 		return;
88a3d43c0dSVinicius Costa Gomes 
89a3d43c0dSVinicius Costa Gomes 	list_for_each_entry_safe(entry, n, &sched->entries, list) {
90a3d43c0dSVinicius Costa Gomes 		list_del(&entry->list);
91a3d43c0dSVinicius Costa Gomes 		kfree(entry);
92a3d43c0dSVinicius Costa Gomes 	}
93a3d43c0dSVinicius Costa Gomes 
94a3d43c0dSVinicius Costa Gomes 	kfree(sched);
95a3d43c0dSVinicius Costa Gomes }
96a3d43c0dSVinicius Costa Gomes 
97a3d43c0dSVinicius Costa Gomes static void switch_schedules(struct taprio_sched *q,
98a3d43c0dSVinicius Costa Gomes 			     struct sched_gate_list **admin,
99a3d43c0dSVinicius Costa Gomes 			     struct sched_gate_list **oper)
100a3d43c0dSVinicius Costa Gomes {
101a3d43c0dSVinicius Costa Gomes 	rcu_assign_pointer(q->oper_sched, *admin);
102a3d43c0dSVinicius Costa Gomes 	rcu_assign_pointer(q->admin_sched, NULL);
103a3d43c0dSVinicius Costa Gomes 
104a3d43c0dSVinicius Costa Gomes 	if (*oper)
105a3d43c0dSVinicius Costa Gomes 		call_rcu(&(*oper)->rcu, taprio_free_sched_cb);
106a3d43c0dSVinicius Costa Gomes 
107a3d43c0dSVinicius Costa Gomes 	*oper = *admin;
108a3d43c0dSVinicius Costa Gomes 	*admin = NULL;
109a3d43c0dSVinicius Costa Gomes }
110a3d43c0dSVinicius Costa Gomes 
1115a781ccbSVinicius Costa Gomes static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1125a781ccbSVinicius Costa Gomes 			  struct sk_buff **to_free)
1135a781ccbSVinicius Costa Gomes {
1145a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
1155a781ccbSVinicius Costa Gomes 	struct Qdisc *child;
1165a781ccbSVinicius Costa Gomes 	int queue;
1175a781ccbSVinicius Costa Gomes 
1185a781ccbSVinicius Costa Gomes 	queue = skb_get_queue_mapping(skb);
1195a781ccbSVinicius Costa Gomes 
1205a781ccbSVinicius Costa Gomes 	child = q->qdiscs[queue];
1215a781ccbSVinicius Costa Gomes 	if (unlikely(!child))
1225a781ccbSVinicius Costa Gomes 		return qdisc_drop(skb, sch, to_free);
1235a781ccbSVinicius Costa Gomes 
1245a781ccbSVinicius Costa Gomes 	qdisc_qstats_backlog_inc(sch, skb);
1255a781ccbSVinicius Costa Gomes 	sch->q.qlen++;
1265a781ccbSVinicius Costa Gomes 
1275a781ccbSVinicius Costa Gomes 	return qdisc_enqueue(skb, child, to_free);
1285a781ccbSVinicius Costa Gomes }
1295a781ccbSVinicius Costa Gomes 
1305a781ccbSVinicius Costa Gomes static struct sk_buff *taprio_peek(struct Qdisc *sch)
1315a781ccbSVinicius Costa Gomes {
1325a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
1335a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
1345a781ccbSVinicius Costa Gomes 	struct sched_entry *entry;
1355a781ccbSVinicius Costa Gomes 	struct sk_buff *skb;
1365a781ccbSVinicius Costa Gomes 	u32 gate_mask;
1375a781ccbSVinicius Costa Gomes 	int i;
1385a781ccbSVinicius Costa Gomes 
1395a781ccbSVinicius Costa Gomes 	rcu_read_lock();
1405a781ccbSVinicius Costa Gomes 	entry = rcu_dereference(q->current_entry);
1412684d1b7SAndre Guedes 	gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
1425a781ccbSVinicius Costa Gomes 	rcu_read_unlock();
1435a781ccbSVinicius Costa Gomes 
1445a781ccbSVinicius Costa Gomes 	if (!gate_mask)
1455a781ccbSVinicius Costa Gomes 		return NULL;
1465a781ccbSVinicius Costa Gomes 
1475a781ccbSVinicius Costa Gomes 	for (i = 0; i < dev->num_tx_queues; i++) {
1485a781ccbSVinicius Costa Gomes 		struct Qdisc *child = q->qdiscs[i];
1495a781ccbSVinicius Costa Gomes 		int prio;
1505a781ccbSVinicius Costa Gomes 		u8 tc;
1515a781ccbSVinicius Costa Gomes 
1525a781ccbSVinicius Costa Gomes 		if (unlikely(!child))
1535a781ccbSVinicius Costa Gomes 			continue;
1545a781ccbSVinicius Costa Gomes 
1555a781ccbSVinicius Costa Gomes 		skb = child->ops->peek(child);
1565a781ccbSVinicius Costa Gomes 		if (!skb)
1575a781ccbSVinicius Costa Gomes 			continue;
1585a781ccbSVinicius Costa Gomes 
1595a781ccbSVinicius Costa Gomes 		prio = skb->priority;
1605a781ccbSVinicius Costa Gomes 		tc = netdev_get_prio_tc_map(dev, prio);
1615a781ccbSVinicius Costa Gomes 
1625a781ccbSVinicius Costa Gomes 		if (!(gate_mask & BIT(tc)))
1632684d1b7SAndre Guedes 			continue;
1645a781ccbSVinicius Costa Gomes 
1655a781ccbSVinicius Costa Gomes 		return skb;
1665a781ccbSVinicius Costa Gomes 	}
1675a781ccbSVinicius Costa Gomes 
1685a781ccbSVinicius Costa Gomes 	return NULL;
1695a781ccbSVinicius Costa Gomes }
1705a781ccbSVinicius Costa Gomes 
1715a781ccbSVinicius Costa Gomes static inline int length_to_duration(struct taprio_sched *q, int len)
1725a781ccbSVinicius Costa Gomes {
17323bddf69SJakub Kicinski 	return div_u64(len * atomic64_read(&q->picos_per_byte), 1000);
17423bddf69SJakub Kicinski }
17523bddf69SJakub Kicinski 
17623bddf69SJakub Kicinski static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
17723bddf69SJakub Kicinski {
17823bddf69SJakub Kicinski 	atomic_set(&entry->budget,
17923bddf69SJakub Kicinski 		   div64_u64((u64)entry->interval * 1000,
18023bddf69SJakub Kicinski 			     atomic64_read(&q->picos_per_byte)));
1815a781ccbSVinicius Costa Gomes }
1825a781ccbSVinicius Costa Gomes 
1835a781ccbSVinicius Costa Gomes static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
1845a781ccbSVinicius Costa Gomes {
1855a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
1865a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
1878c79f0eaSVinicius Costa Gomes 	struct sk_buff *skb = NULL;
1885a781ccbSVinicius Costa Gomes 	struct sched_entry *entry;
1895a781ccbSVinicius Costa Gomes 	u32 gate_mask;
1905a781ccbSVinicius Costa Gomes 	int i;
1915a781ccbSVinicius Costa Gomes 
1927b9eba7bSLeandro Dorileo 	if (atomic64_read(&q->picos_per_byte) == -1) {
1937b9eba7bSLeandro Dorileo 		WARN_ONCE(1, "taprio: dequeue() called with unknown picos per byte.");
1947b9eba7bSLeandro Dorileo 		return NULL;
1957b9eba7bSLeandro Dorileo 	}
1967b9eba7bSLeandro Dorileo 
1975a781ccbSVinicius Costa Gomes 	rcu_read_lock();
1985a781ccbSVinicius Costa Gomes 	entry = rcu_dereference(q->current_entry);
1995a781ccbSVinicius Costa Gomes 	/* if there's no entry, it means that the schedule didn't
2005a781ccbSVinicius Costa Gomes 	 * start yet, so force all gates to be open, this is in
2015a781ccbSVinicius Costa Gomes 	 * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
2025a781ccbSVinicius Costa Gomes 	 * "AdminGateSates"
2035a781ccbSVinicius Costa Gomes 	 */
2045a781ccbSVinicius Costa Gomes 	gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
2055a781ccbSVinicius Costa Gomes 
2065a781ccbSVinicius Costa Gomes 	if (!gate_mask)
2078c79f0eaSVinicius Costa Gomes 		goto done;
2085a781ccbSVinicius Costa Gomes 
2095a781ccbSVinicius Costa Gomes 	for (i = 0; i < dev->num_tx_queues; i++) {
2105a781ccbSVinicius Costa Gomes 		struct Qdisc *child = q->qdiscs[i];
2115a781ccbSVinicius Costa Gomes 		ktime_t guard;
2125a781ccbSVinicius Costa Gomes 		int prio;
2135a781ccbSVinicius Costa Gomes 		int len;
2145a781ccbSVinicius Costa Gomes 		u8 tc;
2155a781ccbSVinicius Costa Gomes 
2165a781ccbSVinicius Costa Gomes 		if (unlikely(!child))
2175a781ccbSVinicius Costa Gomes 			continue;
2185a781ccbSVinicius Costa Gomes 
2195a781ccbSVinicius Costa Gomes 		skb = child->ops->peek(child);
2205a781ccbSVinicius Costa Gomes 		if (!skb)
2215a781ccbSVinicius Costa Gomes 			continue;
2225a781ccbSVinicius Costa Gomes 
2235a781ccbSVinicius Costa Gomes 		prio = skb->priority;
2245a781ccbSVinicius Costa Gomes 		tc = netdev_get_prio_tc_map(dev, prio);
2255a781ccbSVinicius Costa Gomes 
2265a781ccbSVinicius Costa Gomes 		if (!(gate_mask & BIT(tc)))
2275a781ccbSVinicius Costa Gomes 			continue;
2285a781ccbSVinicius Costa Gomes 
2295a781ccbSVinicius Costa Gomes 		len = qdisc_pkt_len(skb);
2305a781ccbSVinicius Costa Gomes 		guard = ktime_add_ns(q->get_time(),
2315a781ccbSVinicius Costa Gomes 				     length_to_duration(q, len));
2325a781ccbSVinicius Costa Gomes 
2335a781ccbSVinicius Costa Gomes 		/* In the case that there's no gate entry, there's no
2345a781ccbSVinicius Costa Gomes 		 * guard band ...
2355a781ccbSVinicius Costa Gomes 		 */
2365a781ccbSVinicius Costa Gomes 		if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
2375a781ccbSVinicius Costa Gomes 		    ktime_after(guard, entry->close_time))
2386e734c82SAndre Guedes 			continue;
2395a781ccbSVinicius Costa Gomes 
2405a781ccbSVinicius Costa Gomes 		/* ... and no budget. */
2415a781ccbSVinicius Costa Gomes 		if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
2425a781ccbSVinicius Costa Gomes 		    atomic_sub_return(len, &entry->budget) < 0)
2436e734c82SAndre Guedes 			continue;
2445a781ccbSVinicius Costa Gomes 
2455a781ccbSVinicius Costa Gomes 		skb = child->ops->dequeue(child);
2465a781ccbSVinicius Costa Gomes 		if (unlikely(!skb))
2478c79f0eaSVinicius Costa Gomes 			goto done;
2485a781ccbSVinicius Costa Gomes 
2495a781ccbSVinicius Costa Gomes 		qdisc_bstats_update(sch, skb);
2505a781ccbSVinicius Costa Gomes 		qdisc_qstats_backlog_dec(sch, skb);
2515a781ccbSVinicius Costa Gomes 		sch->q.qlen--;
2525a781ccbSVinicius Costa Gomes 
2538c79f0eaSVinicius Costa Gomes 		goto done;
2545a781ccbSVinicius Costa Gomes 	}
2555a781ccbSVinicius Costa Gomes 
2568c79f0eaSVinicius Costa Gomes done:
2578c79f0eaSVinicius Costa Gomes 	rcu_read_unlock();
2588c79f0eaSVinicius Costa Gomes 
2598c79f0eaSVinicius Costa Gomes 	return skb;
2605a781ccbSVinicius Costa Gomes }
2615a781ccbSVinicius Costa Gomes 
2626ca6a665SVinicius Costa Gomes static bool should_restart_cycle(const struct sched_gate_list *oper,
2636ca6a665SVinicius Costa Gomes 				 const struct sched_entry *entry)
2646ca6a665SVinicius Costa Gomes {
2656ca6a665SVinicius Costa Gomes 	if (list_is_last(&entry->list, &oper->entries))
2666ca6a665SVinicius Costa Gomes 		return true;
2676ca6a665SVinicius Costa Gomes 
2686ca6a665SVinicius Costa Gomes 	if (ktime_compare(entry->close_time, oper->cycle_close_time) == 0)
2696ca6a665SVinicius Costa Gomes 		return true;
2706ca6a665SVinicius Costa Gomes 
2716ca6a665SVinicius Costa Gomes 	return false;
2726ca6a665SVinicius Costa Gomes }
2736ca6a665SVinicius Costa Gomes 
274a3d43c0dSVinicius Costa Gomes static bool should_change_schedules(const struct sched_gate_list *admin,
275a3d43c0dSVinicius Costa Gomes 				    const struct sched_gate_list *oper,
276a3d43c0dSVinicius Costa Gomes 				    ktime_t close_time)
277a3d43c0dSVinicius Costa Gomes {
278c25031e9SVinicius Costa Gomes 	ktime_t next_base_time, extension_time;
279a3d43c0dSVinicius Costa Gomes 
280a3d43c0dSVinicius Costa Gomes 	if (!admin)
281a3d43c0dSVinicius Costa Gomes 		return false;
282a3d43c0dSVinicius Costa Gomes 
283a3d43c0dSVinicius Costa Gomes 	next_base_time = sched_base_time(admin);
284a3d43c0dSVinicius Costa Gomes 
285a3d43c0dSVinicius Costa Gomes 	/* This is the simple case, the close_time would fall after
286a3d43c0dSVinicius Costa Gomes 	 * the next schedule base_time.
287a3d43c0dSVinicius Costa Gomes 	 */
288a3d43c0dSVinicius Costa Gomes 	if (ktime_compare(next_base_time, close_time) <= 0)
289a3d43c0dSVinicius Costa Gomes 		return true;
290a3d43c0dSVinicius Costa Gomes 
291c25031e9SVinicius Costa Gomes 	/* This is the cycle_time_extension case, if the close_time
292c25031e9SVinicius Costa Gomes 	 * plus the amount that can be extended would fall after the
293c25031e9SVinicius Costa Gomes 	 * next schedule base_time, we can extend the current schedule
294c25031e9SVinicius Costa Gomes 	 * for that amount.
295c25031e9SVinicius Costa Gomes 	 */
296c25031e9SVinicius Costa Gomes 	extension_time = ktime_add_ns(close_time, oper->cycle_time_extension);
297c25031e9SVinicius Costa Gomes 
298c25031e9SVinicius Costa Gomes 	/* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about
299c25031e9SVinicius Costa Gomes 	 * how precisely the extension should be made. So after
300c25031e9SVinicius Costa Gomes 	 * conformance testing, this logic may change.
301c25031e9SVinicius Costa Gomes 	 */
302c25031e9SVinicius Costa Gomes 	if (ktime_compare(next_base_time, extension_time) <= 0)
303c25031e9SVinicius Costa Gomes 		return true;
304c25031e9SVinicius Costa Gomes 
305a3d43c0dSVinicius Costa Gomes 	return false;
306a3d43c0dSVinicius Costa Gomes }
307a3d43c0dSVinicius Costa Gomes 
3085a781ccbSVinicius Costa Gomes static enum hrtimer_restart advance_sched(struct hrtimer *timer)
3095a781ccbSVinicius Costa Gomes {
3105a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = container_of(timer, struct taprio_sched,
3115a781ccbSVinicius Costa Gomes 					      advance_timer);
312a3d43c0dSVinicius Costa Gomes 	struct sched_gate_list *oper, *admin;
3135a781ccbSVinicius Costa Gomes 	struct sched_entry *entry, *next;
3145a781ccbSVinicius Costa Gomes 	struct Qdisc *sch = q->root;
3155a781ccbSVinicius Costa Gomes 	ktime_t close_time;
3165a781ccbSVinicius Costa Gomes 
3175a781ccbSVinicius Costa Gomes 	spin_lock(&q->current_entry_lock);
3185a781ccbSVinicius Costa Gomes 	entry = rcu_dereference_protected(q->current_entry,
3195a781ccbSVinicius Costa Gomes 					  lockdep_is_held(&q->current_entry_lock));
320a3d43c0dSVinicius Costa Gomes 	oper = rcu_dereference_protected(q->oper_sched,
321a3d43c0dSVinicius Costa Gomes 					 lockdep_is_held(&q->current_entry_lock));
322a3d43c0dSVinicius Costa Gomes 	admin = rcu_dereference_protected(q->admin_sched,
323a3d43c0dSVinicius Costa Gomes 					  lockdep_is_held(&q->current_entry_lock));
3245a781ccbSVinicius Costa Gomes 
325a3d43c0dSVinicius Costa Gomes 	if (!oper)
326a3d43c0dSVinicius Costa Gomes 		switch_schedules(q, &admin, &oper);
327a3d43c0dSVinicius Costa Gomes 
328a3d43c0dSVinicius Costa Gomes 	/* This can happen in two cases: 1. this is the very first run
329a3d43c0dSVinicius Costa Gomes 	 * of this function (i.e. we weren't running any schedule
330a3d43c0dSVinicius Costa Gomes 	 * previously); 2. The previous schedule just ended. The first
331a3d43c0dSVinicius Costa Gomes 	 * entry of all schedules are pre-calculated during the
332a3d43c0dSVinicius Costa Gomes 	 * schedule initialization.
3335a781ccbSVinicius Costa Gomes 	 */
334a3d43c0dSVinicius Costa Gomes 	if (unlikely(!entry || entry->close_time == oper->base_time)) {
335a3d43c0dSVinicius Costa Gomes 		next = list_first_entry(&oper->entries, struct sched_entry,
3365a781ccbSVinicius Costa Gomes 					list);
3375a781ccbSVinicius Costa Gomes 		close_time = next->close_time;
3385a781ccbSVinicius Costa Gomes 		goto first_run;
3395a781ccbSVinicius Costa Gomes 	}
3405a781ccbSVinicius Costa Gomes 
3416ca6a665SVinicius Costa Gomes 	if (should_restart_cycle(oper, entry)) {
342a3d43c0dSVinicius Costa Gomes 		next = list_first_entry(&oper->entries, struct sched_entry,
3435a781ccbSVinicius Costa Gomes 					list);
3446ca6a665SVinicius Costa Gomes 		oper->cycle_close_time = ktime_add_ns(oper->cycle_close_time,
3456ca6a665SVinicius Costa Gomes 						      oper->cycle_time);
3466ca6a665SVinicius Costa Gomes 	} else {
3475a781ccbSVinicius Costa Gomes 		next = list_next_entry(entry, list);
3486ca6a665SVinicius Costa Gomes 	}
3495a781ccbSVinicius Costa Gomes 
3505a781ccbSVinicius Costa Gomes 	close_time = ktime_add_ns(entry->close_time, next->interval);
3516ca6a665SVinicius Costa Gomes 	close_time = min_t(ktime_t, close_time, oper->cycle_close_time);
3525a781ccbSVinicius Costa Gomes 
353a3d43c0dSVinicius Costa Gomes 	if (should_change_schedules(admin, oper, close_time)) {
354a3d43c0dSVinicius Costa Gomes 		/* Set things so the next time this runs, the new
355a3d43c0dSVinicius Costa Gomes 		 * schedule runs.
356a3d43c0dSVinicius Costa Gomes 		 */
357a3d43c0dSVinicius Costa Gomes 		close_time = sched_base_time(admin);
358a3d43c0dSVinicius Costa Gomes 		switch_schedules(q, &admin, &oper);
359a3d43c0dSVinicius Costa Gomes 	}
360a3d43c0dSVinicius Costa Gomes 
3615a781ccbSVinicius Costa Gomes 	next->close_time = close_time;
36223bddf69SJakub Kicinski 	taprio_set_budget(q, next);
3635a781ccbSVinicius Costa Gomes 
3645a781ccbSVinicius Costa Gomes first_run:
3655a781ccbSVinicius Costa Gomes 	rcu_assign_pointer(q->current_entry, next);
3665a781ccbSVinicius Costa Gomes 	spin_unlock(&q->current_entry_lock);
3675a781ccbSVinicius Costa Gomes 
3685a781ccbSVinicius Costa Gomes 	hrtimer_set_expires(&q->advance_timer, close_time);
3695a781ccbSVinicius Costa Gomes 
3705a781ccbSVinicius Costa Gomes 	rcu_read_lock();
3715a781ccbSVinicius Costa Gomes 	__netif_schedule(sch);
3725a781ccbSVinicius Costa Gomes 	rcu_read_unlock();
3735a781ccbSVinicius Costa Gomes 
3745a781ccbSVinicius Costa Gomes 	return HRTIMER_RESTART;
3755a781ccbSVinicius Costa Gomes }
3765a781ccbSVinicius Costa Gomes 
3775a781ccbSVinicius Costa Gomes static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
3785a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_SCHED_ENTRY_INDEX]	   = { .type = NLA_U32 },
3795a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_SCHED_ENTRY_CMD]	   = { .type = NLA_U8 },
3805a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 },
3815a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]  = { .type = NLA_U32 },
3825a781ccbSVinicius Costa Gomes };
3835a781ccbSVinicius Costa Gomes 
3845a781ccbSVinicius Costa Gomes static const struct nla_policy entry_list_policy[TCA_TAPRIO_SCHED_MAX + 1] = {
3855a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_SCHED_ENTRY] = { .type = NLA_NESTED },
3865a781ccbSVinicius Costa Gomes };
3875a781ccbSVinicius Costa Gomes 
3885a781ccbSVinicius Costa Gomes static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
3895a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_PRIOMAP]	       = {
3905a781ccbSVinicius Costa Gomes 		.len = sizeof(struct tc_mqprio_qopt)
3915a781ccbSVinicius Costa Gomes 	},
3925a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]           = { .type = NLA_NESTED },
3935a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]            = { .type = NLA_S64 },
3945a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]         = { .type = NLA_NESTED },
3955a781ccbSVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_SCHED_CLOCKID]              = { .type = NLA_S32 },
3966ca6a665SVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]           = { .type = NLA_S64 },
397c25031e9SVinicius Costa Gomes 	[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
3985a781ccbSVinicius Costa Gomes };
3995a781ccbSVinicius Costa Gomes 
4005a781ccbSVinicius Costa Gomes static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
4015a781ccbSVinicius Costa Gomes 			    struct netlink_ext_ack *extack)
4025a781ccbSVinicius Costa Gomes {
4035a781ccbSVinicius Costa Gomes 	u32 interval = 0;
4045a781ccbSVinicius Costa Gomes 
4055a781ccbSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
4065a781ccbSVinicius Costa Gomes 		entry->command = nla_get_u8(
4075a781ccbSVinicius Costa Gomes 			tb[TCA_TAPRIO_SCHED_ENTRY_CMD]);
4085a781ccbSVinicius Costa Gomes 
4095a781ccbSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK])
4105a781ccbSVinicius Costa Gomes 		entry->gate_mask = nla_get_u32(
4115a781ccbSVinicius Costa Gomes 			tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]);
4125a781ccbSVinicius Costa Gomes 
4135a781ccbSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL])
4145a781ccbSVinicius Costa Gomes 		interval = nla_get_u32(
4155a781ccbSVinicius Costa Gomes 			tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
4165a781ccbSVinicius Costa Gomes 
4175a781ccbSVinicius Costa Gomes 	if (interval == 0) {
4185a781ccbSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
4195a781ccbSVinicius Costa Gomes 		return -EINVAL;
4205a781ccbSVinicius Costa Gomes 	}
4215a781ccbSVinicius Costa Gomes 
4225a781ccbSVinicius Costa Gomes 	entry->interval = interval;
4235a781ccbSVinicius Costa Gomes 
4245a781ccbSVinicius Costa Gomes 	return 0;
4255a781ccbSVinicius Costa Gomes }
4265a781ccbSVinicius Costa Gomes 
4275a781ccbSVinicius Costa Gomes static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry,
4285a781ccbSVinicius Costa Gomes 			     int index, struct netlink_ext_ack *extack)
4295a781ccbSVinicius Costa Gomes {
4305a781ccbSVinicius Costa Gomes 	struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
4315a781ccbSVinicius Costa Gomes 	int err;
4325a781ccbSVinicius Costa Gomes 
4338cb08174SJohannes Berg 	err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n,
4345a781ccbSVinicius Costa Gomes 					  entry_policy, NULL);
4355a781ccbSVinicius Costa Gomes 	if (err < 0) {
4365a781ccbSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Could not parse nested entry");
4375a781ccbSVinicius Costa Gomes 		return -EINVAL;
4385a781ccbSVinicius Costa Gomes 	}
4395a781ccbSVinicius Costa Gomes 
4405a781ccbSVinicius Costa Gomes 	entry->index = index;
4415a781ccbSVinicius Costa Gomes 
4425a781ccbSVinicius Costa Gomes 	return fill_sched_entry(tb, entry, extack);
4435a781ccbSVinicius Costa Gomes }
4445a781ccbSVinicius Costa Gomes 
4455a781ccbSVinicius Costa Gomes static int parse_sched_list(struct nlattr *list,
446a3d43c0dSVinicius Costa Gomes 			    struct sched_gate_list *sched,
4475a781ccbSVinicius Costa Gomes 			    struct netlink_ext_ack *extack)
4485a781ccbSVinicius Costa Gomes {
4495a781ccbSVinicius Costa Gomes 	struct nlattr *n;
4505a781ccbSVinicius Costa Gomes 	int err, rem;
4515a781ccbSVinicius Costa Gomes 	int i = 0;
4525a781ccbSVinicius Costa Gomes 
4535a781ccbSVinicius Costa Gomes 	if (!list)
4545a781ccbSVinicius Costa Gomes 		return -EINVAL;
4555a781ccbSVinicius Costa Gomes 
4565a781ccbSVinicius Costa Gomes 	nla_for_each_nested(n, list, rem) {
4575a781ccbSVinicius Costa Gomes 		struct sched_entry *entry;
4585a781ccbSVinicius Costa Gomes 
4595a781ccbSVinicius Costa Gomes 		if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) {
4605a781ccbSVinicius Costa Gomes 			NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'");
4615a781ccbSVinicius Costa Gomes 			continue;
4625a781ccbSVinicius Costa Gomes 		}
4635a781ccbSVinicius Costa Gomes 
4645a781ccbSVinicius Costa Gomes 		entry = kzalloc(sizeof(*entry), GFP_KERNEL);
4655a781ccbSVinicius Costa Gomes 		if (!entry) {
4665a781ccbSVinicius Costa Gomes 			NL_SET_ERR_MSG(extack, "Not enough memory for entry");
4675a781ccbSVinicius Costa Gomes 			return -ENOMEM;
4685a781ccbSVinicius Costa Gomes 		}
4695a781ccbSVinicius Costa Gomes 
4705a781ccbSVinicius Costa Gomes 		err = parse_sched_entry(n, entry, i, extack);
4715a781ccbSVinicius Costa Gomes 		if (err < 0) {
4725a781ccbSVinicius Costa Gomes 			kfree(entry);
4735a781ccbSVinicius Costa Gomes 			return err;
4745a781ccbSVinicius Costa Gomes 		}
4755a781ccbSVinicius Costa Gomes 
476a3d43c0dSVinicius Costa Gomes 		list_add_tail(&entry->list, &sched->entries);
4775a781ccbSVinicius Costa Gomes 		i++;
4785a781ccbSVinicius Costa Gomes 	}
4795a781ccbSVinicius Costa Gomes 
480a3d43c0dSVinicius Costa Gomes 	sched->num_entries = i;
4815a781ccbSVinicius Costa Gomes 
4825a781ccbSVinicius Costa Gomes 	return i;
4835a781ccbSVinicius Costa Gomes }
4845a781ccbSVinicius Costa Gomes 
485a3d43c0dSVinicius Costa Gomes static int parse_taprio_schedule(struct nlattr **tb,
486a3d43c0dSVinicius Costa Gomes 				 struct sched_gate_list *new,
4875a781ccbSVinicius Costa Gomes 				 struct netlink_ext_ack *extack)
4885a781ccbSVinicius Costa Gomes {
4895a781ccbSVinicius Costa Gomes 	int err = 0;
4905a781ccbSVinicius Costa Gomes 
491a3d43c0dSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) {
492a3d43c0dSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Adding a single entry is not supported");
493a3d43c0dSVinicius Costa Gomes 		return -ENOTSUPP;
494a3d43c0dSVinicius Costa Gomes 	}
4955a781ccbSVinicius Costa Gomes 
4965a781ccbSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
497a3d43c0dSVinicius Costa Gomes 		new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
4985a781ccbSVinicius Costa Gomes 
499c25031e9SVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION])
500c25031e9SVinicius Costa Gomes 		new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]);
501c25031e9SVinicius Costa Gomes 
5026ca6a665SVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME])
5036ca6a665SVinicius Costa Gomes 		new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
5046ca6a665SVinicius Costa Gomes 
5055a781ccbSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
5065a781ccbSVinicius Costa Gomes 		err = parse_sched_list(
507a3d43c0dSVinicius Costa Gomes 			tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], new, extack);
508a3d43c0dSVinicius Costa Gomes 	if (err < 0)
5095a781ccbSVinicius Costa Gomes 		return err;
510a3d43c0dSVinicius Costa Gomes 
511*037be037SVedang Patel 	if (!new->cycle_time) {
512*037be037SVedang Patel 		struct sched_entry *entry;
513*037be037SVedang Patel 		ktime_t cycle = 0;
514*037be037SVedang Patel 
515*037be037SVedang Patel 		list_for_each_entry(entry, &new->entries, list)
516*037be037SVedang Patel 			cycle = ktime_add_ns(cycle, entry->interval);
517*037be037SVedang Patel 		new->cycle_time = cycle;
518*037be037SVedang Patel 	}
519*037be037SVedang Patel 
520a3d43c0dSVinicius Costa Gomes 	return 0;
5215a781ccbSVinicius Costa Gomes }
5225a781ccbSVinicius Costa Gomes 
5235a781ccbSVinicius Costa Gomes static int taprio_parse_mqprio_opt(struct net_device *dev,
5245a781ccbSVinicius Costa Gomes 				   struct tc_mqprio_qopt *qopt,
5255a781ccbSVinicius Costa Gomes 				   struct netlink_ext_ack *extack)
5265a781ccbSVinicius Costa Gomes {
5275a781ccbSVinicius Costa Gomes 	int i, j;
5285a781ccbSVinicius Costa Gomes 
529a3d43c0dSVinicius Costa Gomes 	if (!qopt && !dev->num_tc) {
5305a781ccbSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
5315a781ccbSVinicius Costa Gomes 		return -EINVAL;
5325a781ccbSVinicius Costa Gomes 	}
5335a781ccbSVinicius Costa Gomes 
534a3d43c0dSVinicius Costa Gomes 	/* If num_tc is already set, it means that the user already
535a3d43c0dSVinicius Costa Gomes 	 * configured the mqprio part
536a3d43c0dSVinicius Costa Gomes 	 */
537a3d43c0dSVinicius Costa Gomes 	if (dev->num_tc)
538a3d43c0dSVinicius Costa Gomes 		return 0;
539a3d43c0dSVinicius Costa Gomes 
5405a781ccbSVinicius Costa Gomes 	/* Verify num_tc is not out of max range */
5415a781ccbSVinicius Costa Gomes 	if (qopt->num_tc > TC_MAX_QUEUE) {
5425a781ccbSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Number of traffic classes is outside valid range");
5435a781ccbSVinicius Costa Gomes 		return -EINVAL;
5445a781ccbSVinicius Costa Gomes 	}
5455a781ccbSVinicius Costa Gomes 
5465a781ccbSVinicius Costa Gomes 	/* taprio imposes that traffic classes map 1:n to tx queues */
5475a781ccbSVinicius Costa Gomes 	if (qopt->num_tc > dev->num_tx_queues) {
5485a781ccbSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues");
5495a781ccbSVinicius Costa Gomes 		return -EINVAL;
5505a781ccbSVinicius Costa Gomes 	}
5515a781ccbSVinicius Costa Gomes 
5525a781ccbSVinicius Costa Gomes 	/* Verify priority mapping uses valid tcs */
5535a781ccbSVinicius Costa Gomes 	for (i = 0; i < TC_BITMASK + 1; i++) {
5545a781ccbSVinicius Costa Gomes 		if (qopt->prio_tc_map[i] >= qopt->num_tc) {
5555a781ccbSVinicius Costa Gomes 			NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping");
5565a781ccbSVinicius Costa Gomes 			return -EINVAL;
5575a781ccbSVinicius Costa Gomes 		}
5585a781ccbSVinicius Costa Gomes 	}
5595a781ccbSVinicius Costa Gomes 
5605a781ccbSVinicius Costa Gomes 	for (i = 0; i < qopt->num_tc; i++) {
5615a781ccbSVinicius Costa Gomes 		unsigned int last = qopt->offset[i] + qopt->count[i];
5625a781ccbSVinicius Costa Gomes 
5635a781ccbSVinicius Costa Gomes 		/* Verify the queue count is in tx range being equal to the
5645a781ccbSVinicius Costa Gomes 		 * real_num_tx_queues indicates the last queue is in use.
5655a781ccbSVinicius Costa Gomes 		 */
5665a781ccbSVinicius Costa Gomes 		if (qopt->offset[i] >= dev->num_tx_queues ||
5675a781ccbSVinicius Costa Gomes 		    !qopt->count[i] ||
5685a781ccbSVinicius Costa Gomes 		    last > dev->real_num_tx_queues) {
5695a781ccbSVinicius Costa Gomes 			NL_SET_ERR_MSG(extack, "Invalid queue in traffic class to queue mapping");
5705a781ccbSVinicius Costa Gomes 			return -EINVAL;
5715a781ccbSVinicius Costa Gomes 		}
5725a781ccbSVinicius Costa Gomes 
5735a781ccbSVinicius Costa Gomes 		/* Verify that the offset and counts do not overlap */
5745a781ccbSVinicius Costa Gomes 		for (j = i + 1; j < qopt->num_tc; j++) {
5755a781ccbSVinicius Costa Gomes 			if (last > qopt->offset[j]) {
5765a781ccbSVinicius Costa Gomes 				NL_SET_ERR_MSG(extack, "Detected overlap in the traffic class to queue mapping");
5775a781ccbSVinicius Costa Gomes 				return -EINVAL;
5785a781ccbSVinicius Costa Gomes 			}
5795a781ccbSVinicius Costa Gomes 		}
5805a781ccbSVinicius Costa Gomes 	}
5815a781ccbSVinicius Costa Gomes 
5825a781ccbSVinicius Costa Gomes 	return 0;
5835a781ccbSVinicius Costa Gomes }
5845a781ccbSVinicius Costa Gomes 
585a3d43c0dSVinicius Costa Gomes static int taprio_get_start_time(struct Qdisc *sch,
586a3d43c0dSVinicius Costa Gomes 				 struct sched_gate_list *sched,
587a3d43c0dSVinicius Costa Gomes 				 ktime_t *start)
5885a781ccbSVinicius Costa Gomes {
5895a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
5905a781ccbSVinicius Costa Gomes 	ktime_t now, base, cycle;
5915a781ccbSVinicius Costa Gomes 	s64 n;
5925a781ccbSVinicius Costa Gomes 
593a3d43c0dSVinicius Costa Gomes 	base = sched_base_time(sched);
5948599099fSAndre Guedes 	now = q->get_time();
5958599099fSAndre Guedes 
5968599099fSAndre Guedes 	if (ktime_after(base, now)) {
5978599099fSAndre Guedes 		*start = base;
5988599099fSAndre Guedes 		return 0;
5998599099fSAndre Guedes 	}
6005a781ccbSVinicius Costa Gomes 
601*037be037SVedang Patel 	cycle = sched->cycle_time;
6025a781ccbSVinicius Costa Gomes 
6038599099fSAndre Guedes 	/* The qdisc is expected to have at least one sched_entry.  Moreover,
6048599099fSAndre Guedes 	 * any entry must have 'interval' > 0. Thus if the cycle time is zero,
6058599099fSAndre Guedes 	 * something went really wrong. In that case, we should warn about this
6068599099fSAndre Guedes 	 * inconsistent state and return error.
6078599099fSAndre Guedes 	 */
6088599099fSAndre Guedes 	if (WARN_ON(!cycle))
6098599099fSAndre Guedes 		return -EFAULT;
6105a781ccbSVinicius Costa Gomes 
6115a781ccbSVinicius Costa Gomes 	/* Schedule the start time for the beginning of the next
6125a781ccbSVinicius Costa Gomes 	 * cycle.
6135a781ccbSVinicius Costa Gomes 	 */
6145a781ccbSVinicius Costa Gomes 	n = div64_s64(ktime_sub_ns(now, base), cycle);
6158599099fSAndre Guedes 	*start = ktime_add_ns(base, (n + 1) * cycle);
6168599099fSAndre Guedes 	return 0;
6175a781ccbSVinicius Costa Gomes }
6185a781ccbSVinicius Costa Gomes 
619a3d43c0dSVinicius Costa Gomes static void setup_first_close_time(struct taprio_sched *q,
620a3d43c0dSVinicius Costa Gomes 				   struct sched_gate_list *sched, ktime_t base)
6215a781ccbSVinicius Costa Gomes {
6225a781ccbSVinicius Costa Gomes 	struct sched_entry *first;
6236ca6a665SVinicius Costa Gomes 	ktime_t cycle;
6245a781ccbSVinicius Costa Gomes 
625a3d43c0dSVinicius Costa Gomes 	first = list_first_entry(&sched->entries,
626a3d43c0dSVinicius Costa Gomes 				 struct sched_entry, list);
6275a781ccbSVinicius Costa Gomes 
628*037be037SVedang Patel 	cycle = sched->cycle_time;
6296ca6a665SVinicius Costa Gomes 
6306ca6a665SVinicius Costa Gomes 	/* FIXME: find a better place to do this */
6316ca6a665SVinicius Costa Gomes 	sched->cycle_close_time = ktime_add_ns(base, cycle);
6326ca6a665SVinicius Costa Gomes 
633a3d43c0dSVinicius Costa Gomes 	first->close_time = ktime_add_ns(base, first->interval);
63423bddf69SJakub Kicinski 	taprio_set_budget(q, first);
6355a781ccbSVinicius Costa Gomes 	rcu_assign_pointer(q->current_entry, NULL);
636a3d43c0dSVinicius Costa Gomes }
6375a781ccbSVinicius Costa Gomes 
638a3d43c0dSVinicius Costa Gomes static void taprio_start_sched(struct Qdisc *sch,
639a3d43c0dSVinicius Costa Gomes 			       ktime_t start, struct sched_gate_list *new)
640a3d43c0dSVinicius Costa Gomes {
641a3d43c0dSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
642a3d43c0dSVinicius Costa Gomes 	ktime_t expires;
643a3d43c0dSVinicius Costa Gomes 
644a3d43c0dSVinicius Costa Gomes 	expires = hrtimer_get_expires(&q->advance_timer);
645a3d43c0dSVinicius Costa Gomes 	if (expires == 0)
646a3d43c0dSVinicius Costa Gomes 		expires = KTIME_MAX;
647a3d43c0dSVinicius Costa Gomes 
648a3d43c0dSVinicius Costa Gomes 	/* If the new schedule starts before the next expiration, we
649a3d43c0dSVinicius Costa Gomes 	 * reprogram it to the earliest one, so we change the admin
650a3d43c0dSVinicius Costa Gomes 	 * schedule to the operational one at the right time.
651a3d43c0dSVinicius Costa Gomes 	 */
652a3d43c0dSVinicius Costa Gomes 	start = min_t(ktime_t, start, expires);
6535a781ccbSVinicius Costa Gomes 
6545a781ccbSVinicius Costa Gomes 	hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
6555a781ccbSVinicius Costa Gomes }
6565a781ccbSVinicius Costa Gomes 
6577b9eba7bSLeandro Dorileo static void taprio_set_picos_per_byte(struct net_device *dev,
6587b9eba7bSLeandro Dorileo 				      struct taprio_sched *q)
6597b9eba7bSLeandro Dorileo {
6607b9eba7bSLeandro Dorileo 	struct ethtool_link_ksettings ecmd;
6617b9eba7bSLeandro Dorileo 	int picos_per_byte = -1;
6627b9eba7bSLeandro Dorileo 
6637b9eba7bSLeandro Dorileo 	if (!__ethtool_get_link_ksettings(dev, &ecmd) &&
6647b9eba7bSLeandro Dorileo 	    ecmd.base.speed != SPEED_UNKNOWN)
6657b9eba7bSLeandro Dorileo 		picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
6667b9eba7bSLeandro Dorileo 					   ecmd.base.speed * 1000 * 1000);
6677b9eba7bSLeandro Dorileo 
6687b9eba7bSLeandro Dorileo 	atomic64_set(&q->picos_per_byte, picos_per_byte);
6697b9eba7bSLeandro Dorileo 	netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
6707b9eba7bSLeandro Dorileo 		   dev->name, (long long)atomic64_read(&q->picos_per_byte),
6717b9eba7bSLeandro Dorileo 		   ecmd.base.speed);
6727b9eba7bSLeandro Dorileo }
6737b9eba7bSLeandro Dorileo 
6747b9eba7bSLeandro Dorileo static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
6757b9eba7bSLeandro Dorileo 			       void *ptr)
6767b9eba7bSLeandro Dorileo {
6777b9eba7bSLeandro Dorileo 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6787b9eba7bSLeandro Dorileo 	struct net_device *qdev;
6797b9eba7bSLeandro Dorileo 	struct taprio_sched *q;
6807b9eba7bSLeandro Dorileo 	bool found = false;
6817b9eba7bSLeandro Dorileo 
6827b9eba7bSLeandro Dorileo 	ASSERT_RTNL();
6837b9eba7bSLeandro Dorileo 
6847b9eba7bSLeandro Dorileo 	if (event != NETDEV_UP && event != NETDEV_CHANGE)
6857b9eba7bSLeandro Dorileo 		return NOTIFY_DONE;
6867b9eba7bSLeandro Dorileo 
6877b9eba7bSLeandro Dorileo 	spin_lock(&taprio_list_lock);
6887b9eba7bSLeandro Dorileo 	list_for_each_entry(q, &taprio_list, taprio_list) {
6897b9eba7bSLeandro Dorileo 		qdev = qdisc_dev(q->root);
6907b9eba7bSLeandro Dorileo 		if (qdev == dev) {
6917b9eba7bSLeandro Dorileo 			found = true;
6927b9eba7bSLeandro Dorileo 			break;
6937b9eba7bSLeandro Dorileo 		}
6947b9eba7bSLeandro Dorileo 	}
6957b9eba7bSLeandro Dorileo 	spin_unlock(&taprio_list_lock);
6967b9eba7bSLeandro Dorileo 
6977b9eba7bSLeandro Dorileo 	if (found)
6987b9eba7bSLeandro Dorileo 		taprio_set_picos_per_byte(dev, q);
6997b9eba7bSLeandro Dorileo 
7007b9eba7bSLeandro Dorileo 	return NOTIFY_DONE;
7017b9eba7bSLeandro Dorileo }
7027b9eba7bSLeandro Dorileo 
7035a781ccbSVinicius Costa Gomes static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
7045a781ccbSVinicius Costa Gomes 			 struct netlink_ext_ack *extack)
7055a781ccbSVinicius Costa Gomes {
7065a781ccbSVinicius Costa Gomes 	struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
707a3d43c0dSVinicius Costa Gomes 	struct sched_gate_list *oper, *admin, *new_admin;
7085a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
7095a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
7105a781ccbSVinicius Costa Gomes 	struct tc_mqprio_qopt *mqprio = NULL;
711a3d43c0dSVinicius Costa Gomes 	int i, err, clockid;
712a3d43c0dSVinicius Costa Gomes 	unsigned long flags;
7135a781ccbSVinicius Costa Gomes 	ktime_t start;
7145a781ccbSVinicius Costa Gomes 
7158cb08174SJohannes Berg 	err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt,
7165a781ccbSVinicius Costa Gomes 					  taprio_policy, extack);
7175a781ccbSVinicius Costa Gomes 	if (err < 0)
7185a781ccbSVinicius Costa Gomes 		return err;
7195a781ccbSVinicius Costa Gomes 
7205a781ccbSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
7215a781ccbSVinicius Costa Gomes 		mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
7225a781ccbSVinicius Costa Gomes 
7235a781ccbSVinicius Costa Gomes 	err = taprio_parse_mqprio_opt(dev, mqprio, extack);
7245a781ccbSVinicius Costa Gomes 	if (err < 0)
7255a781ccbSVinicius Costa Gomes 		return err;
7265a781ccbSVinicius Costa Gomes 
727a3d43c0dSVinicius Costa Gomes 	new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL);
728a3d43c0dSVinicius Costa Gomes 	if (!new_admin) {
729a3d43c0dSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule");
730a3d43c0dSVinicius Costa Gomes 		return -ENOMEM;
731a3d43c0dSVinicius Costa Gomes 	}
732a3d43c0dSVinicius Costa Gomes 	INIT_LIST_HEAD(&new_admin->entries);
7335a781ccbSVinicius Costa Gomes 
734a3d43c0dSVinicius Costa Gomes 	rcu_read_lock();
735a3d43c0dSVinicius Costa Gomes 	oper = rcu_dereference(q->oper_sched);
736a3d43c0dSVinicius Costa Gomes 	admin = rcu_dereference(q->admin_sched);
737a3d43c0dSVinicius Costa Gomes 	rcu_read_unlock();
7385a781ccbSVinicius Costa Gomes 
739a3d43c0dSVinicius Costa Gomes 	if (mqprio && (oper || admin)) {
740a3d43c0dSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported");
741a3d43c0dSVinicius Costa Gomes 		err = -ENOTSUPP;
742a3d43c0dSVinicius Costa Gomes 		goto free_sched;
7435a781ccbSVinicius Costa Gomes 	}
7445a781ccbSVinicius Costa Gomes 
745a3d43c0dSVinicius Costa Gomes 	err = parse_taprio_schedule(tb, new_admin, extack);
746a3d43c0dSVinicius Costa Gomes 	if (err < 0)
747a3d43c0dSVinicius Costa Gomes 		goto free_sched;
7485a781ccbSVinicius Costa Gomes 
749a3d43c0dSVinicius Costa Gomes 	if (new_admin->num_entries == 0) {
750a3d43c0dSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule");
751a3d43c0dSVinicius Costa Gomes 		err = -EINVAL;
752a3d43c0dSVinicius Costa Gomes 		goto free_sched;
753a3d43c0dSVinicius Costa Gomes 	}
7545a781ccbSVinicius Costa Gomes 
755a3d43c0dSVinicius Costa Gomes 	if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
756a3d43c0dSVinicius Costa Gomes 		clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
7575a781ccbSVinicius Costa Gomes 
758a3d43c0dSVinicius Costa Gomes 		/* We only support static clockids and we don't allow
759a3d43c0dSVinicius Costa Gomes 		 * for it to be modified after the first init.
760a3d43c0dSVinicius Costa Gomes 		 */
761a3d43c0dSVinicius Costa Gomes 		if (clockid < 0 ||
762a3d43c0dSVinicius Costa Gomes 		    (q->clockid != -1 && q->clockid != clockid)) {
763a3d43c0dSVinicius Costa Gomes 			NL_SET_ERR_MSG(extack, "Changing the 'clockid' of a running schedule is not supported");
764a3d43c0dSVinicius Costa Gomes 			err = -ENOTSUPP;
765a3d43c0dSVinicius Costa Gomes 			goto free_sched;
766a3d43c0dSVinicius Costa Gomes 		}
767a3d43c0dSVinicius Costa Gomes 
768a3d43c0dSVinicius Costa Gomes 		q->clockid = clockid;
769a3d43c0dSVinicius Costa Gomes 	}
770a3d43c0dSVinicius Costa Gomes 
771a3d43c0dSVinicius Costa Gomes 	if (q->clockid == -1 && !tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
772a3d43c0dSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
773a3d43c0dSVinicius Costa Gomes 		err = -EINVAL;
774a3d43c0dSVinicius Costa Gomes 		goto free_sched;
775a3d43c0dSVinicius Costa Gomes 	}
776a3d43c0dSVinicius Costa Gomes 
777a3d43c0dSVinicius Costa Gomes 	taprio_set_picos_per_byte(dev, q);
778a3d43c0dSVinicius Costa Gomes 
779a3d43c0dSVinicius Costa Gomes 	/* Protects against enqueue()/dequeue() */
780a3d43c0dSVinicius Costa Gomes 	spin_lock_bh(qdisc_lock(sch));
781a3d43c0dSVinicius Costa Gomes 
782a3d43c0dSVinicius Costa Gomes 	if (!hrtimer_active(&q->advance_timer)) {
783a3d43c0dSVinicius Costa Gomes 		hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
784a3d43c0dSVinicius Costa Gomes 		q->advance_timer.function = advance_sched;
7855a781ccbSVinicius Costa Gomes 	}
7865a781ccbSVinicius Costa Gomes 
7875a781ccbSVinicius Costa Gomes 	if (mqprio) {
7885a781ccbSVinicius Costa Gomes 		netdev_set_num_tc(dev, mqprio->num_tc);
7895a781ccbSVinicius Costa Gomes 		for (i = 0; i < mqprio->num_tc; i++)
7905a781ccbSVinicius Costa Gomes 			netdev_set_tc_queue(dev, i,
7915a781ccbSVinicius Costa Gomes 					    mqprio->count[i],
7925a781ccbSVinicius Costa Gomes 					    mqprio->offset[i]);
7935a781ccbSVinicius Costa Gomes 
7945a781ccbSVinicius Costa Gomes 		/* Always use supplied priority mappings */
7955a781ccbSVinicius Costa Gomes 		for (i = 0; i < TC_BITMASK + 1; i++)
7965a781ccbSVinicius Costa Gomes 			netdev_set_prio_tc_map(dev, i,
7975a781ccbSVinicius Costa Gomes 					       mqprio->prio_tc_map[i]);
7985a781ccbSVinicius Costa Gomes 	}
7995a781ccbSVinicius Costa Gomes 
800a3d43c0dSVinicius Costa Gomes 	switch (q->clockid) {
801a3d43c0dSVinicius Costa Gomes 	case CLOCK_REALTIME:
802a3d43c0dSVinicius Costa Gomes 		q->get_time = ktime_get_real;
803a3d43c0dSVinicius Costa Gomes 		break;
804a3d43c0dSVinicius Costa Gomes 	case CLOCK_MONOTONIC:
805a3d43c0dSVinicius Costa Gomes 		q->get_time = ktime_get;
806a3d43c0dSVinicius Costa Gomes 		break;
807a3d43c0dSVinicius Costa Gomes 	case CLOCK_BOOTTIME:
808a3d43c0dSVinicius Costa Gomes 		q->get_time = ktime_get_boottime;
809a3d43c0dSVinicius Costa Gomes 		break;
810a3d43c0dSVinicius Costa Gomes 	case CLOCK_TAI:
811a3d43c0dSVinicius Costa Gomes 		q->get_time = ktime_get_clocktai;
812a3d43c0dSVinicius Costa Gomes 		break;
813a3d43c0dSVinicius Costa Gomes 	default:
814a3d43c0dSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
815a3d43c0dSVinicius Costa Gomes 		err = -EINVAL;
816a3d43c0dSVinicius Costa Gomes 		goto unlock;
8178599099fSAndre Guedes 	}
8185a781ccbSVinicius Costa Gomes 
819a3d43c0dSVinicius Costa Gomes 	err = taprio_get_start_time(sch, new_admin, &start);
820a3d43c0dSVinicius Costa Gomes 	if (err < 0) {
821a3d43c0dSVinicius Costa Gomes 		NL_SET_ERR_MSG(extack, "Internal error: failed get start time");
822a3d43c0dSVinicius Costa Gomes 		goto unlock;
823a3d43c0dSVinicius Costa Gomes 	}
8245a781ccbSVinicius Costa Gomes 
825a3d43c0dSVinicius Costa Gomes 	setup_first_close_time(q, new_admin, start);
826a3d43c0dSVinicius Costa Gomes 
827a3d43c0dSVinicius Costa Gomes 	/* Protects against advance_sched() */
828a3d43c0dSVinicius Costa Gomes 	spin_lock_irqsave(&q->current_entry_lock, flags);
829a3d43c0dSVinicius Costa Gomes 
830a3d43c0dSVinicius Costa Gomes 	taprio_start_sched(sch, start, new_admin);
831a3d43c0dSVinicius Costa Gomes 
832a3d43c0dSVinicius Costa Gomes 	rcu_assign_pointer(q->admin_sched, new_admin);
833a3d43c0dSVinicius Costa Gomes 	if (admin)
834a3d43c0dSVinicius Costa Gomes 		call_rcu(&admin->rcu, taprio_free_sched_cb);
835a3d43c0dSVinicius Costa Gomes 	new_admin = NULL;
836a3d43c0dSVinicius Costa Gomes 
837a3d43c0dSVinicius Costa Gomes 	spin_unlock_irqrestore(&q->current_entry_lock, flags);
838a3d43c0dSVinicius Costa Gomes 
839a3d43c0dSVinicius Costa Gomes 	err = 0;
840a3d43c0dSVinicius Costa Gomes 
841a3d43c0dSVinicius Costa Gomes unlock:
842a3d43c0dSVinicius Costa Gomes 	spin_unlock_bh(qdisc_lock(sch));
843a3d43c0dSVinicius Costa Gomes 
844a3d43c0dSVinicius Costa Gomes free_sched:
845a3d43c0dSVinicius Costa Gomes 	kfree(new_admin);
846a3d43c0dSVinicius Costa Gomes 
847a3d43c0dSVinicius Costa Gomes 	return err;
8485a781ccbSVinicius Costa Gomes }
8495a781ccbSVinicius Costa Gomes 
8505a781ccbSVinicius Costa Gomes static void taprio_destroy(struct Qdisc *sch)
8515a781ccbSVinicius Costa Gomes {
8525a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
8535a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
8545a781ccbSVinicius Costa Gomes 	unsigned int i;
8555a781ccbSVinicius Costa Gomes 
8567b9eba7bSLeandro Dorileo 	spin_lock(&taprio_list_lock);
8577b9eba7bSLeandro Dorileo 	list_del(&q->taprio_list);
8587b9eba7bSLeandro Dorileo 	spin_unlock(&taprio_list_lock);
8597b9eba7bSLeandro Dorileo 
8605a781ccbSVinicius Costa Gomes 	hrtimer_cancel(&q->advance_timer);
8615a781ccbSVinicius Costa Gomes 
8625a781ccbSVinicius Costa Gomes 	if (q->qdiscs) {
8635a781ccbSVinicius Costa Gomes 		for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
8645a781ccbSVinicius Costa Gomes 			qdisc_put(q->qdiscs[i]);
8655a781ccbSVinicius Costa Gomes 
8665a781ccbSVinicius Costa Gomes 		kfree(q->qdiscs);
8675a781ccbSVinicius Costa Gomes 	}
8685a781ccbSVinicius Costa Gomes 	q->qdiscs = NULL;
8695a781ccbSVinicius Costa Gomes 
8705a781ccbSVinicius Costa Gomes 	netdev_set_num_tc(dev, 0);
8715a781ccbSVinicius Costa Gomes 
872a3d43c0dSVinicius Costa Gomes 	if (q->oper_sched)
873a3d43c0dSVinicius Costa Gomes 		call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb);
874a3d43c0dSVinicius Costa Gomes 
875a3d43c0dSVinicius Costa Gomes 	if (q->admin_sched)
876a3d43c0dSVinicius Costa Gomes 		call_rcu(&q->admin_sched->rcu, taprio_free_sched_cb);
8775a781ccbSVinicius Costa Gomes }
8785a781ccbSVinicius Costa Gomes 
8795a781ccbSVinicius Costa Gomes static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
8805a781ccbSVinicius Costa Gomes 		       struct netlink_ext_ack *extack)
8815a781ccbSVinicius Costa Gomes {
8825a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
8835a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
884a3d43c0dSVinicius Costa Gomes 	int i;
8855a781ccbSVinicius Costa Gomes 
8865a781ccbSVinicius Costa Gomes 	spin_lock_init(&q->current_entry_lock);
8875a781ccbSVinicius Costa Gomes 
8885a781ccbSVinicius Costa Gomes 	hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
889a3d43c0dSVinicius Costa Gomes 	q->advance_timer.function = advance_sched;
8905a781ccbSVinicius Costa Gomes 
8915a781ccbSVinicius Costa Gomes 	q->root = sch;
8925a781ccbSVinicius Costa Gomes 
8935a781ccbSVinicius Costa Gomes 	/* We only support static clockids. Use an invalid value as default
8945a781ccbSVinicius Costa Gomes 	 * and get the valid one on taprio_change().
8955a781ccbSVinicius Costa Gomes 	 */
8965a781ccbSVinicius Costa Gomes 	q->clockid = -1;
8975a781ccbSVinicius Costa Gomes 
8985a781ccbSVinicius Costa Gomes 	if (sch->parent != TC_H_ROOT)
8995a781ccbSVinicius Costa Gomes 		return -EOPNOTSUPP;
9005a781ccbSVinicius Costa Gomes 
9015a781ccbSVinicius Costa Gomes 	if (!netif_is_multiqueue(dev))
9025a781ccbSVinicius Costa Gomes 		return -EOPNOTSUPP;
9035a781ccbSVinicius Costa Gomes 
9045a781ccbSVinicius Costa Gomes 	/* pre-allocate qdisc, attachment can't fail */
9055a781ccbSVinicius Costa Gomes 	q->qdiscs = kcalloc(dev->num_tx_queues,
9065a781ccbSVinicius Costa Gomes 			    sizeof(q->qdiscs[0]),
9075a781ccbSVinicius Costa Gomes 			    GFP_KERNEL);
9085a781ccbSVinicius Costa Gomes 
9095a781ccbSVinicius Costa Gomes 	if (!q->qdiscs)
9105a781ccbSVinicius Costa Gomes 		return -ENOMEM;
9115a781ccbSVinicius Costa Gomes 
9125a781ccbSVinicius Costa Gomes 	if (!opt)
9135a781ccbSVinicius Costa Gomes 		return -EINVAL;
9145a781ccbSVinicius Costa Gomes 
9157b9eba7bSLeandro Dorileo 	spin_lock(&taprio_list_lock);
9167b9eba7bSLeandro Dorileo 	list_add(&q->taprio_list, &taprio_list);
9177b9eba7bSLeandro Dorileo 	spin_unlock(&taprio_list_lock);
9187b9eba7bSLeandro Dorileo 
919a3d43c0dSVinicius Costa Gomes 	for (i = 0; i < dev->num_tx_queues; i++) {
920a3d43c0dSVinicius Costa Gomes 		struct netdev_queue *dev_queue;
921a3d43c0dSVinicius Costa Gomes 		struct Qdisc *qdisc;
922a3d43c0dSVinicius Costa Gomes 
923a3d43c0dSVinicius Costa Gomes 		dev_queue = netdev_get_tx_queue(dev, i);
924a3d43c0dSVinicius Costa Gomes 		qdisc = qdisc_create_dflt(dev_queue,
925a3d43c0dSVinicius Costa Gomes 					  &pfifo_qdisc_ops,
926a3d43c0dSVinicius Costa Gomes 					  TC_H_MAKE(TC_H_MAJ(sch->handle),
927a3d43c0dSVinicius Costa Gomes 						    TC_H_MIN(i + 1)),
928a3d43c0dSVinicius Costa Gomes 					  extack);
929a3d43c0dSVinicius Costa Gomes 		if (!qdisc)
930a3d43c0dSVinicius Costa Gomes 			return -ENOMEM;
931a3d43c0dSVinicius Costa Gomes 
932a3d43c0dSVinicius Costa Gomes 		if (i < dev->real_num_tx_queues)
933a3d43c0dSVinicius Costa Gomes 			qdisc_hash_add(qdisc, false);
934a3d43c0dSVinicius Costa Gomes 
935a3d43c0dSVinicius Costa Gomes 		q->qdiscs[i] = qdisc;
936a3d43c0dSVinicius Costa Gomes 	}
937a3d43c0dSVinicius Costa Gomes 
9385a781ccbSVinicius Costa Gomes 	return taprio_change(sch, opt, extack);
9395a781ccbSVinicius Costa Gomes }
9405a781ccbSVinicius Costa Gomes 
9415a781ccbSVinicius Costa Gomes static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
9425a781ccbSVinicius Costa Gomes 					     unsigned long cl)
9435a781ccbSVinicius Costa Gomes {
9445a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
9455a781ccbSVinicius Costa Gomes 	unsigned long ntx = cl - 1;
9465a781ccbSVinicius Costa Gomes 
9475a781ccbSVinicius Costa Gomes 	if (ntx >= dev->num_tx_queues)
9485a781ccbSVinicius Costa Gomes 		return NULL;
9495a781ccbSVinicius Costa Gomes 
9505a781ccbSVinicius Costa Gomes 	return netdev_get_tx_queue(dev, ntx);
9515a781ccbSVinicius Costa Gomes }
9525a781ccbSVinicius Costa Gomes 
9535a781ccbSVinicius Costa Gomes static int taprio_graft(struct Qdisc *sch, unsigned long cl,
9545a781ccbSVinicius Costa Gomes 			struct Qdisc *new, struct Qdisc **old,
9555a781ccbSVinicius Costa Gomes 			struct netlink_ext_ack *extack)
9565a781ccbSVinicius Costa Gomes {
9575a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
9585a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
9595a781ccbSVinicius Costa Gomes 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
9605a781ccbSVinicius Costa Gomes 
9615a781ccbSVinicius Costa Gomes 	if (!dev_queue)
9625a781ccbSVinicius Costa Gomes 		return -EINVAL;
9635a781ccbSVinicius Costa Gomes 
9645a781ccbSVinicius Costa Gomes 	if (dev->flags & IFF_UP)
9655a781ccbSVinicius Costa Gomes 		dev_deactivate(dev);
9665a781ccbSVinicius Costa Gomes 
9675a781ccbSVinicius Costa Gomes 	*old = q->qdiscs[cl - 1];
9685a781ccbSVinicius Costa Gomes 	q->qdiscs[cl - 1] = new;
9695a781ccbSVinicius Costa Gomes 
9705a781ccbSVinicius Costa Gomes 	if (new)
9715a781ccbSVinicius Costa Gomes 		new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
9725a781ccbSVinicius Costa Gomes 
9735a781ccbSVinicius Costa Gomes 	if (dev->flags & IFF_UP)
9745a781ccbSVinicius Costa Gomes 		dev_activate(dev);
9755a781ccbSVinicius Costa Gomes 
9765a781ccbSVinicius Costa Gomes 	return 0;
9775a781ccbSVinicius Costa Gomes }
9785a781ccbSVinicius Costa Gomes 
9795a781ccbSVinicius Costa Gomes static int dump_entry(struct sk_buff *msg,
9805a781ccbSVinicius Costa Gomes 		      const struct sched_entry *entry)
9815a781ccbSVinicius Costa Gomes {
9825a781ccbSVinicius Costa Gomes 	struct nlattr *item;
9835a781ccbSVinicius Costa Gomes 
984ae0be8deSMichal Kubecek 	item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY);
9855a781ccbSVinicius Costa Gomes 	if (!item)
9865a781ccbSVinicius Costa Gomes 		return -ENOSPC;
9875a781ccbSVinicius Costa Gomes 
9885a781ccbSVinicius Costa Gomes 	if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index))
9895a781ccbSVinicius Costa Gomes 		goto nla_put_failure;
9905a781ccbSVinicius Costa Gomes 
9915a781ccbSVinicius Costa Gomes 	if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command))
9925a781ccbSVinicius Costa Gomes 		goto nla_put_failure;
9935a781ccbSVinicius Costa Gomes 
9945a781ccbSVinicius Costa Gomes 	if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK,
9955a781ccbSVinicius Costa Gomes 			entry->gate_mask))
9965a781ccbSVinicius Costa Gomes 		goto nla_put_failure;
9975a781ccbSVinicius Costa Gomes 
9985a781ccbSVinicius Costa Gomes 	if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL,
9995a781ccbSVinicius Costa Gomes 			entry->interval))
10005a781ccbSVinicius Costa Gomes 		goto nla_put_failure;
10015a781ccbSVinicius Costa Gomes 
10025a781ccbSVinicius Costa Gomes 	return nla_nest_end(msg, item);
10035a781ccbSVinicius Costa Gomes 
10045a781ccbSVinicius Costa Gomes nla_put_failure:
10055a781ccbSVinicius Costa Gomes 	nla_nest_cancel(msg, item);
10065a781ccbSVinicius Costa Gomes 	return -1;
10075a781ccbSVinicius Costa Gomes }
10085a781ccbSVinicius Costa Gomes 
1009a3d43c0dSVinicius Costa Gomes static int dump_schedule(struct sk_buff *msg,
1010a3d43c0dSVinicius Costa Gomes 			 const struct sched_gate_list *root)
1011a3d43c0dSVinicius Costa Gomes {
1012a3d43c0dSVinicius Costa Gomes 	struct nlattr *entry_list;
1013a3d43c0dSVinicius Costa Gomes 	struct sched_entry *entry;
1014a3d43c0dSVinicius Costa Gomes 
1015a3d43c0dSVinicius Costa Gomes 	if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME,
1016a3d43c0dSVinicius Costa Gomes 			root->base_time, TCA_TAPRIO_PAD))
1017a3d43c0dSVinicius Costa Gomes 		return -1;
1018a3d43c0dSVinicius Costa Gomes 
10196ca6a665SVinicius Costa Gomes 	if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME,
10206ca6a665SVinicius Costa Gomes 			root->cycle_time, TCA_TAPRIO_PAD))
10216ca6a665SVinicius Costa Gomes 		return -1;
10226ca6a665SVinicius Costa Gomes 
1023c25031e9SVinicius Costa Gomes 	if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
1024c25031e9SVinicius Costa Gomes 			root->cycle_time_extension, TCA_TAPRIO_PAD))
1025c25031e9SVinicius Costa Gomes 		return -1;
1026c25031e9SVinicius Costa Gomes 
1027a3d43c0dSVinicius Costa Gomes 	entry_list = nla_nest_start_noflag(msg,
1028a3d43c0dSVinicius Costa Gomes 					   TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST);
1029a3d43c0dSVinicius Costa Gomes 	if (!entry_list)
1030a3d43c0dSVinicius Costa Gomes 		goto error_nest;
1031a3d43c0dSVinicius Costa Gomes 
1032a3d43c0dSVinicius Costa Gomes 	list_for_each_entry(entry, &root->entries, list) {
1033a3d43c0dSVinicius Costa Gomes 		if (dump_entry(msg, entry) < 0)
1034a3d43c0dSVinicius Costa Gomes 			goto error_nest;
1035a3d43c0dSVinicius Costa Gomes 	}
1036a3d43c0dSVinicius Costa Gomes 
1037a3d43c0dSVinicius Costa Gomes 	nla_nest_end(msg, entry_list);
1038a3d43c0dSVinicius Costa Gomes 	return 0;
1039a3d43c0dSVinicius Costa Gomes 
1040a3d43c0dSVinicius Costa Gomes error_nest:
1041a3d43c0dSVinicius Costa Gomes 	nla_nest_cancel(msg, entry_list);
1042a3d43c0dSVinicius Costa Gomes 	return -1;
1043a3d43c0dSVinicius Costa Gomes }
1044a3d43c0dSVinicius Costa Gomes 
10455a781ccbSVinicius Costa Gomes static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
10465a781ccbSVinicius Costa Gomes {
10475a781ccbSVinicius Costa Gomes 	struct taprio_sched *q = qdisc_priv(sch);
10485a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
1049a3d43c0dSVinicius Costa Gomes 	struct sched_gate_list *oper, *admin;
10505a781ccbSVinicius Costa Gomes 	struct tc_mqprio_qopt opt = { 0 };
1051a3d43c0dSVinicius Costa Gomes 	struct nlattr *nest, *sched_nest;
10525a781ccbSVinicius Costa Gomes 	unsigned int i;
10535a781ccbSVinicius Costa Gomes 
1054a3d43c0dSVinicius Costa Gomes 	rcu_read_lock();
1055a3d43c0dSVinicius Costa Gomes 	oper = rcu_dereference(q->oper_sched);
1056a3d43c0dSVinicius Costa Gomes 	admin = rcu_dereference(q->admin_sched);
1057a3d43c0dSVinicius Costa Gomes 
10585a781ccbSVinicius Costa Gomes 	opt.num_tc = netdev_get_num_tc(dev);
10595a781ccbSVinicius Costa Gomes 	memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
10605a781ccbSVinicius Costa Gomes 
10615a781ccbSVinicius Costa Gomes 	for (i = 0; i < netdev_get_num_tc(dev); i++) {
10625a781ccbSVinicius Costa Gomes 		opt.count[i] = dev->tc_to_txq[i].count;
10635a781ccbSVinicius Costa Gomes 		opt.offset[i] = dev->tc_to_txq[i].offset;
10645a781ccbSVinicius Costa Gomes 	}
10655a781ccbSVinicius Costa Gomes 
1066ae0be8deSMichal Kubecek 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
10675a781ccbSVinicius Costa Gomes 	if (!nest)
1068a3d43c0dSVinicius Costa Gomes 		goto start_error;
10695a781ccbSVinicius Costa Gomes 
10705a781ccbSVinicius Costa Gomes 	if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
10715a781ccbSVinicius Costa Gomes 		goto options_error;
10725a781ccbSVinicius Costa Gomes 
10735a781ccbSVinicius Costa Gomes 	if (nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
10745a781ccbSVinicius Costa Gomes 		goto options_error;
10755a781ccbSVinicius Costa Gomes 
1076a3d43c0dSVinicius Costa Gomes 	if (oper && dump_schedule(skb, oper))
10775a781ccbSVinicius Costa Gomes 		goto options_error;
10785a781ccbSVinicius Costa Gomes 
1079a3d43c0dSVinicius Costa Gomes 	if (!admin)
1080a3d43c0dSVinicius Costa Gomes 		goto done;
10815a781ccbSVinicius Costa Gomes 
1082a3d43c0dSVinicius Costa Gomes 	sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED);
1083e4acf427SColin Ian King 	if (!sched_nest)
1084e4acf427SColin Ian King 		goto options_error;
1085a3d43c0dSVinicius Costa Gomes 
1086a3d43c0dSVinicius Costa Gomes 	if (dump_schedule(skb, admin))
1087a3d43c0dSVinicius Costa Gomes 		goto admin_error;
1088a3d43c0dSVinicius Costa Gomes 
1089a3d43c0dSVinicius Costa Gomes 	nla_nest_end(skb, sched_nest);
1090a3d43c0dSVinicius Costa Gomes 
1091a3d43c0dSVinicius Costa Gomes done:
1092a3d43c0dSVinicius Costa Gomes 	rcu_read_unlock();
10935a781ccbSVinicius Costa Gomes 
10945a781ccbSVinicius Costa Gomes 	return nla_nest_end(skb, nest);
10955a781ccbSVinicius Costa Gomes 
1096a3d43c0dSVinicius Costa Gomes admin_error:
1097a3d43c0dSVinicius Costa Gomes 	nla_nest_cancel(skb, sched_nest);
1098a3d43c0dSVinicius Costa Gomes 
10995a781ccbSVinicius Costa Gomes options_error:
11005a781ccbSVinicius Costa Gomes 	nla_nest_cancel(skb, nest);
1101a3d43c0dSVinicius Costa Gomes 
1102a3d43c0dSVinicius Costa Gomes start_error:
1103a3d43c0dSVinicius Costa Gomes 	rcu_read_unlock();
1104a3d43c0dSVinicius Costa Gomes 	return -ENOSPC;
11055a781ccbSVinicius Costa Gomes }
11065a781ccbSVinicius Costa Gomes 
11075a781ccbSVinicius Costa Gomes static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
11085a781ccbSVinicius Costa Gomes {
11095a781ccbSVinicius Costa Gomes 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
11105a781ccbSVinicius Costa Gomes 
11115a781ccbSVinicius Costa Gomes 	if (!dev_queue)
11125a781ccbSVinicius Costa Gomes 		return NULL;
11135a781ccbSVinicius Costa Gomes 
11145a781ccbSVinicius Costa Gomes 	return dev_queue->qdisc_sleeping;
11155a781ccbSVinicius Costa Gomes }
11165a781ccbSVinicius Costa Gomes 
11175a781ccbSVinicius Costa Gomes static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
11185a781ccbSVinicius Costa Gomes {
11195a781ccbSVinicius Costa Gomes 	unsigned int ntx = TC_H_MIN(classid);
11205a781ccbSVinicius Costa Gomes 
11215a781ccbSVinicius Costa Gomes 	if (!taprio_queue_get(sch, ntx))
11225a781ccbSVinicius Costa Gomes 		return 0;
11235a781ccbSVinicius Costa Gomes 	return ntx;
11245a781ccbSVinicius Costa Gomes }
11255a781ccbSVinicius Costa Gomes 
11265a781ccbSVinicius Costa Gomes static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
11275a781ccbSVinicius Costa Gomes 			     struct sk_buff *skb, struct tcmsg *tcm)
11285a781ccbSVinicius Costa Gomes {
11295a781ccbSVinicius Costa Gomes 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
11305a781ccbSVinicius Costa Gomes 
11315a781ccbSVinicius Costa Gomes 	tcm->tcm_parent = TC_H_ROOT;
11325a781ccbSVinicius Costa Gomes 	tcm->tcm_handle |= TC_H_MIN(cl);
11335a781ccbSVinicius Costa Gomes 	tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
11345a781ccbSVinicius Costa Gomes 
11355a781ccbSVinicius Costa Gomes 	return 0;
11365a781ccbSVinicius Costa Gomes }
11375a781ccbSVinicius Costa Gomes 
11385a781ccbSVinicius Costa Gomes static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
11395a781ccbSVinicius Costa Gomes 				   struct gnet_dump *d)
11405a781ccbSVinicius Costa Gomes 	__releases(d->lock)
11415a781ccbSVinicius Costa Gomes 	__acquires(d->lock)
11425a781ccbSVinicius Costa Gomes {
11435a781ccbSVinicius Costa Gomes 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
11445a781ccbSVinicius Costa Gomes 
11455a781ccbSVinicius Costa Gomes 	sch = dev_queue->qdisc_sleeping;
11465a781ccbSVinicius Costa Gomes 	if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
11475dd431b6SPaolo Abeni 	    qdisc_qstats_copy(d, sch) < 0)
11485a781ccbSVinicius Costa Gomes 		return -1;
11495a781ccbSVinicius Costa Gomes 	return 0;
11505a781ccbSVinicius Costa Gomes }
11515a781ccbSVinicius Costa Gomes 
11525a781ccbSVinicius Costa Gomes static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
11535a781ccbSVinicius Costa Gomes {
11545a781ccbSVinicius Costa Gomes 	struct net_device *dev = qdisc_dev(sch);
11555a781ccbSVinicius Costa Gomes 	unsigned long ntx;
11565a781ccbSVinicius Costa Gomes 
11575a781ccbSVinicius Costa Gomes 	if (arg->stop)
11585a781ccbSVinicius Costa Gomes 		return;
11595a781ccbSVinicius Costa Gomes 
11605a781ccbSVinicius Costa Gomes 	arg->count = arg->skip;
11615a781ccbSVinicius Costa Gomes 	for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
11625a781ccbSVinicius Costa Gomes 		if (arg->fn(sch, ntx + 1, arg) < 0) {
11635a781ccbSVinicius Costa Gomes 			arg->stop = 1;
11645a781ccbSVinicius Costa Gomes 			break;
11655a781ccbSVinicius Costa Gomes 		}
11665a781ccbSVinicius Costa Gomes 		arg->count++;
11675a781ccbSVinicius Costa Gomes 	}
11685a781ccbSVinicius Costa Gomes }
11695a781ccbSVinicius Costa Gomes 
11705a781ccbSVinicius Costa Gomes static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
11715a781ccbSVinicius Costa Gomes 						struct tcmsg *tcm)
11725a781ccbSVinicius Costa Gomes {
11735a781ccbSVinicius Costa Gomes 	return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
11745a781ccbSVinicius Costa Gomes }
11755a781ccbSVinicius Costa Gomes 
11765a781ccbSVinicius Costa Gomes static const struct Qdisc_class_ops taprio_class_ops = {
11775a781ccbSVinicius Costa Gomes 	.graft		= taprio_graft,
11785a781ccbSVinicius Costa Gomes 	.leaf		= taprio_leaf,
11795a781ccbSVinicius Costa Gomes 	.find		= taprio_find,
11805a781ccbSVinicius Costa Gomes 	.walk		= taprio_walk,
11815a781ccbSVinicius Costa Gomes 	.dump		= taprio_dump_class,
11825a781ccbSVinicius Costa Gomes 	.dump_stats	= taprio_dump_class_stats,
11835a781ccbSVinicius Costa Gomes 	.select_queue	= taprio_select_queue,
11845a781ccbSVinicius Costa Gomes };
11855a781ccbSVinicius Costa Gomes 
11865a781ccbSVinicius Costa Gomes static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
11875a781ccbSVinicius Costa Gomes 	.cl_ops		= &taprio_class_ops,
11885a781ccbSVinicius Costa Gomes 	.id		= "taprio",
11895a781ccbSVinicius Costa Gomes 	.priv_size	= sizeof(struct taprio_sched),
11905a781ccbSVinicius Costa Gomes 	.init		= taprio_init,
1191a3d43c0dSVinicius Costa Gomes 	.change		= taprio_change,
11925a781ccbSVinicius Costa Gomes 	.destroy	= taprio_destroy,
11935a781ccbSVinicius Costa Gomes 	.peek		= taprio_peek,
11945a781ccbSVinicius Costa Gomes 	.dequeue	= taprio_dequeue,
11955a781ccbSVinicius Costa Gomes 	.enqueue	= taprio_enqueue,
11965a781ccbSVinicius Costa Gomes 	.dump		= taprio_dump,
11975a781ccbSVinicius Costa Gomes 	.owner		= THIS_MODULE,
11985a781ccbSVinicius Costa Gomes };
11995a781ccbSVinicius Costa Gomes 
12007b9eba7bSLeandro Dorileo static struct notifier_block taprio_device_notifier = {
12017b9eba7bSLeandro Dorileo 	.notifier_call = taprio_dev_notifier,
12027b9eba7bSLeandro Dorileo };
12037b9eba7bSLeandro Dorileo 
12045a781ccbSVinicius Costa Gomes static int __init taprio_module_init(void)
12055a781ccbSVinicius Costa Gomes {
12067b9eba7bSLeandro Dorileo 	int err = register_netdevice_notifier(&taprio_device_notifier);
12077b9eba7bSLeandro Dorileo 
12087b9eba7bSLeandro Dorileo 	if (err)
12097b9eba7bSLeandro Dorileo 		return err;
12107b9eba7bSLeandro Dorileo 
12115a781ccbSVinicius Costa Gomes 	return register_qdisc(&taprio_qdisc_ops);
12125a781ccbSVinicius Costa Gomes }
12135a781ccbSVinicius Costa Gomes 
12145a781ccbSVinicius Costa Gomes static void __exit taprio_module_exit(void)
12155a781ccbSVinicius Costa Gomes {
12165a781ccbSVinicius Costa Gomes 	unregister_qdisc(&taprio_qdisc_ops);
12177b9eba7bSLeandro Dorileo 	unregister_netdevice_notifier(&taprio_device_notifier);
12185a781ccbSVinicius Costa Gomes }
12195a781ccbSVinicius Costa Gomes 
12205a781ccbSVinicius Costa Gomes module_init(taprio_module_init);
12215a781ccbSVinicius Costa Gomes module_exit(taprio_module_exit);
12225a781ccbSVinicius Costa Gomes MODULE_LICENSE("GPL");
1223