xref: /openbmc/linux/include/net/pkt_sched.h (revision 3ce311af)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_PKT_SCHED_H
3 #define __NET_PKT_SCHED_H
4 
5 #include <linux/jiffies.h>
6 #include <linux/ktime.h>
7 #include <linux/if_vlan.h>
8 #include <linux/netdevice.h>
9 #include <net/sch_generic.h>
10 #include <net/net_namespace.h>
11 #include <uapi/linux/pkt_sched.h>
12 
13 #define DEFAULT_TX_QUEUE_LEN	1000
14 
15 struct qdisc_walker {
16 	int	stop;
17 	int	skip;
18 	int	count;
19 	int	(*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
20 };
21 
22 #define QDISC_ALIGNTO		64
23 #define QDISC_ALIGN(len)	(((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1))
24 
25 static inline void *qdisc_priv(struct Qdisc *q)
26 {
27 	return (char *) q + QDISC_ALIGN(sizeof(struct Qdisc));
28 }
29 
30 /*
31    Timer resolution MUST BE < 10% of min_schedulable_packet_size/bandwidth
32 
33    Normal IP packet size ~ 512byte, hence:
34 
35    0.5Kbyte/1Mbyte/sec = 0.5msec, so that we need 50usec timer for
36    10Mbit ethernet.
37 
38    10msec resolution -> <50Kbit/sec.
39 
40    The result: [34]86 is not good choice for QoS router :-(
41 
42    The things are not so bad, because we may use artificial
43    clock evaluated by integration of network data flow
44    in the most critical places.
45  */
46 
47 typedef u64	psched_time_t;
48 typedef long	psched_tdiff_t;
49 
50 /* Avoid doing 64 bit divide */
51 #define PSCHED_SHIFT			6
52 #define PSCHED_TICKS2NS(x)		((s64)(x) << PSCHED_SHIFT)
53 #define PSCHED_NS2TICKS(x)		((x) >> PSCHED_SHIFT)
54 
55 #define PSCHED_TICKS_PER_SEC		PSCHED_NS2TICKS(NSEC_PER_SEC)
56 #define PSCHED_PASTPERFECT		0
57 
58 static inline psched_time_t psched_get_time(void)
59 {
60 	return PSCHED_NS2TICKS(ktime_get_ns());
61 }
62 
63 static inline psched_tdiff_t
64 psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound)
65 {
66 	return min(tv1 - tv2, bound);
67 }
68 
69 struct qdisc_watchdog {
70 	u64		last_expires;
71 	struct hrtimer	timer;
72 	struct Qdisc	*qdisc;
73 };
74 
75 void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
76 				 clockid_t clockid);
77 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
78 void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
79 
80 static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
81 					   psched_time_t expires)
82 {
83 	qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires));
84 }
85 
86 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
87 
88 extern struct Qdisc_ops pfifo_qdisc_ops;
89 extern struct Qdisc_ops bfifo_qdisc_ops;
90 extern struct Qdisc_ops pfifo_head_drop_qdisc_ops;
91 
92 int fifo_set_limit(struct Qdisc *q, unsigned int limit);
93 struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
94 			       unsigned int limit,
95 			       struct netlink_ext_ack *extack);
96 
97 int register_qdisc(struct Qdisc_ops *qops);
98 int unregister_qdisc(struct Qdisc_ops *qops);
99 void qdisc_get_default(char *id, size_t len);
100 int qdisc_set_default(const char *id);
101 
102 void qdisc_hash_add(struct Qdisc *q, bool invisible);
103 void qdisc_hash_del(struct Qdisc *q);
104 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
105 struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle);
106 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
107 					struct nlattr *tab,
108 					struct netlink_ext_ack *extack);
109 void qdisc_put_rtab(struct qdisc_rate_table *tab);
110 void qdisc_put_stab(struct qdisc_size_table *tab);
111 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc);
112 bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
113 		     struct net_device *dev, struct netdev_queue *txq,
114 		     spinlock_t *root_lock, bool validate);
115 
116 void __qdisc_run(struct Qdisc *q);
117 
118 static inline void qdisc_run(struct Qdisc *q)
119 {
120 	if (qdisc_run_begin(q)) {
121 		/* NOLOCK qdisc must check 'state' under the qdisc seqlock
122 		 * to avoid racing with dev_qdisc_reset()
123 		 */
124 		if (!(q->flags & TCQ_F_NOLOCK) ||
125 		    likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
126 			__qdisc_run(q);
127 		qdisc_run_end(q);
128 	}
129 }
130 
131 static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
132 {
133 	/* We need to take extra care in case the skb came via
134 	 * vlan accelerated path. In that case, use skb->vlan_proto
135 	 * as the original vlan header was already stripped.
136 	 */
137 	if (skb_vlan_tag_present(skb))
138 		return skb->vlan_proto;
139 	return skb->protocol;
140 }
141 
142 /* Calculate maximal size of packet seen by hard_start_xmit
143    routine of this device.
144  */
145 static inline unsigned int psched_mtu(const struct net_device *dev)
146 {
147 	return dev->mtu + dev->hard_header_len;
148 }
149 
150 static inline struct net *qdisc_net(struct Qdisc *q)
151 {
152 	return dev_net(q->dev_queue->dev);
153 }
154 
155 struct tc_cbs_qopt_offload {
156 	u8 enable;
157 	s32 queue;
158 	s32 hicredit;
159 	s32 locredit;
160 	s32 idleslope;
161 	s32 sendslope;
162 };
163 
164 struct tc_etf_qopt_offload {
165 	u8 enable;
166 	s32 queue;
167 };
168 
169 struct tc_taprio_sched_entry {
170 	u8 command; /* TC_TAPRIO_CMD_* */
171 
172 	/* The gate_mask in the offloading side refers to traffic classes */
173 	u32 gate_mask;
174 	u32 interval;
175 };
176 
177 struct tc_taprio_qopt_offload {
178 	u8 enable;
179 	ktime_t base_time;
180 	u64 cycle_time;
181 	u64 cycle_time_extension;
182 
183 	size_t num_entries;
184 	struct tc_taprio_sched_entry entries[0];
185 };
186 
187 /* Reference counting */
188 struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
189 						  *offload);
190 void taprio_offload_free(struct tc_taprio_qopt_offload *offload);
191 
192 #endif
193