xref: /openbmc/linux/include/net/pkt_sched.h (revision b24413180f5600bcb3bb70fbed5cf186b60864bd)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_PKT_SCHED_H
3 #define __NET_PKT_SCHED_H
4 
5 #include <linux/jiffies.h>
6 #include <linux/ktime.h>
7 #include <linux/if_vlan.h>
8 #include <net/sch_generic.h>
9 #include <uapi/linux/pkt_sched.h>
10 
11 #define DEFAULT_TX_QUEUE_LEN	1000
12 
13 struct qdisc_walker {
14 	int	stop;
15 	int	skip;
16 	int	count;
17 	int	(*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
18 };
19 
20 #define QDISC_ALIGNTO		64
21 #define QDISC_ALIGN(len)	(((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1))
22 
23 static inline void *qdisc_priv(struct Qdisc *q)
24 {
25 	return (char *) q + QDISC_ALIGN(sizeof(struct Qdisc));
26 }
27 
28 /*
29    Timer resolution MUST BE < 10% of min_schedulable_packet_size/bandwidth
30 
31    Normal IP packet size ~ 512byte, hence:
32 
33    0.5Kbyte/1Mbyte/sec = 0.5msec, so that we need 50usec timer for
34    10Mbit ethernet.
35 
36    10msec resolution -> <50Kbit/sec.
37 
38    The result: [34]86 is not good choice for QoS router :-(
39 
40    The things are not so bad, because we may use artificial
41    clock evaluated by integration of network data flow
42    in the most critical places.
43  */
44 
45 typedef u64	psched_time_t;
46 typedef long	psched_tdiff_t;
47 
48 /* Avoid doing 64 bit divide */
49 #define PSCHED_SHIFT			6
50 #define PSCHED_TICKS2NS(x)		((s64)(x) << PSCHED_SHIFT)
51 #define PSCHED_NS2TICKS(x)		((x) >> PSCHED_SHIFT)
52 
53 #define PSCHED_TICKS_PER_SEC		PSCHED_NS2TICKS(NSEC_PER_SEC)
54 #define PSCHED_PASTPERFECT		0
55 
56 static inline psched_time_t psched_get_time(void)
57 {
58 	return PSCHED_NS2TICKS(ktime_get_ns());
59 }
60 
61 static inline psched_tdiff_t
62 psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound)
63 {
64 	return min(tv1 - tv2, bound);
65 }
66 
67 struct qdisc_watchdog {
68 	u64		last_expires;
69 	struct hrtimer	timer;
70 	struct Qdisc	*qdisc;
71 };
72 
73 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
74 void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
75 
76 static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
77 					   psched_time_t expires)
78 {
79 	qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires));
80 }
81 
82 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
83 
84 extern struct Qdisc_ops pfifo_qdisc_ops;
85 extern struct Qdisc_ops bfifo_qdisc_ops;
86 extern struct Qdisc_ops pfifo_head_drop_qdisc_ops;
87 
88 int fifo_set_limit(struct Qdisc *q, unsigned int limit);
89 struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
90 			       unsigned int limit);
91 
92 int register_qdisc(struct Qdisc_ops *qops);
93 int unregister_qdisc(struct Qdisc_ops *qops);
94 void qdisc_get_default(char *id, size_t len);
95 int qdisc_set_default(const char *id);
96 
97 void qdisc_hash_add(struct Qdisc *q, bool invisible);
98 void qdisc_hash_del(struct Qdisc *q);
99 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
100 struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
101 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
102 					struct nlattr *tab);
103 void qdisc_put_rtab(struct qdisc_rate_table *tab);
104 void qdisc_put_stab(struct qdisc_size_table *tab);
105 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc);
106 int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
107 		    struct net_device *dev, struct netdev_queue *txq,
108 		    spinlock_t *root_lock, bool validate);
109 
110 void __qdisc_run(struct Qdisc *q);
111 
112 static inline void qdisc_run(struct Qdisc *q)
113 {
114 	if (qdisc_run_begin(q))
115 		__qdisc_run(q);
116 }
117 
118 static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
119 {
120 	/* We need to take extra care in case the skb came via
121 	 * vlan accelerated path. In that case, use skb->vlan_proto
122 	 * as the original vlan header was already stripped.
123 	 */
124 	if (skb_vlan_tag_present(skb))
125 		return skb->vlan_proto;
126 	return skb->protocol;
127 }
128 
129 /* Calculate maximal size of packet seen by hard_start_xmit
130    routine of this device.
131  */
132 static inline unsigned int psched_mtu(const struct net_device *dev)
133 {
134 	return dev->mtu + dev->hard_header_len;
135 }
136 
137 static inline bool is_classid_clsact_ingress(u32 classid)
138 {
139 	/* This also returns true for ingress qdisc */
140 	return TC_H_MAJ(classid) == TC_H_MAJ(TC_H_CLSACT) &&
141 	       TC_H_MIN(classid) != TC_H_MIN(TC_H_MIN_EGRESS);
142 }
143 
144 static inline bool is_classid_clsact_egress(u32 classid)
145 {
146 	return TC_H_MAJ(classid) == TC_H_MAJ(TC_H_CLSACT) &&
147 	       TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_EGRESS);
148 }
149 
150 #endif
151