1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/sched/sch_fifo.c The simplest FIFO queue.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 */
7
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/skbuff.h>
14 #include <net/pkt_sched.h>
15 #include <net/pkt_cls.h>
16
17 /* 1 band FIFO pseudo-"scheduler" */
18
bfifo_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)19 static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
20 struct sk_buff **to_free)
21 {
22 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
23 return qdisc_enqueue_tail(skb, sch);
24
25 return qdisc_drop(skb, sch, to_free);
26 }
27
pfifo_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)28 static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
29 struct sk_buff **to_free)
30 {
31 if (likely(sch->q.qlen < sch->limit))
32 return qdisc_enqueue_tail(skb, sch);
33
34 return qdisc_drop(skb, sch, to_free);
35 }
36
pfifo_tail_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)37 static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
38 struct sk_buff **to_free)
39 {
40 unsigned int prev_backlog;
41
42 if (unlikely(sch->limit == 0))
43 return qdisc_drop(skb, sch, to_free);
44
45 if (likely(sch->q.qlen < sch->limit))
46 return qdisc_enqueue_tail(skb, sch);
47
48 prev_backlog = sch->qstats.backlog;
49 /* queue full, remove one skb to fulfill the limit */
50 __qdisc_queue_drop_head(sch, &sch->q, to_free);
51 qdisc_qstats_drop(sch);
52 qdisc_enqueue_tail(skb, sch);
53
54 qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog);
55 return NET_XMIT_CN;
56 }
57
fifo_offload_init(struct Qdisc * sch)58 static void fifo_offload_init(struct Qdisc *sch)
59 {
60 struct net_device *dev = qdisc_dev(sch);
61 struct tc_fifo_qopt_offload qopt;
62
63 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
64 return;
65
66 qopt.command = TC_FIFO_REPLACE;
67 qopt.handle = sch->handle;
68 qopt.parent = sch->parent;
69 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt);
70 }
71
fifo_offload_destroy(struct Qdisc * sch)72 static void fifo_offload_destroy(struct Qdisc *sch)
73 {
74 struct net_device *dev = qdisc_dev(sch);
75 struct tc_fifo_qopt_offload qopt;
76
77 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
78 return;
79
80 qopt.command = TC_FIFO_DESTROY;
81 qopt.handle = sch->handle;
82 qopt.parent = sch->parent;
83 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt);
84 }
85
fifo_offload_dump(struct Qdisc * sch)86 static int fifo_offload_dump(struct Qdisc *sch)
87 {
88 struct tc_fifo_qopt_offload qopt;
89
90 qopt.command = TC_FIFO_STATS;
91 qopt.handle = sch->handle;
92 qopt.parent = sch->parent;
93 qopt.stats.bstats = &sch->bstats;
94 qopt.stats.qstats = &sch->qstats;
95
96 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_FIFO, &qopt);
97 }
98
__fifo_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)99 static int __fifo_init(struct Qdisc *sch, struct nlattr *opt,
100 struct netlink_ext_ack *extack)
101 {
102 bool bypass;
103 bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
104
105 if (opt == NULL) {
106 u32 limit = qdisc_dev(sch)->tx_queue_len;
107
108 if (is_bfifo)
109 limit *= psched_mtu(qdisc_dev(sch));
110
111 sch->limit = limit;
112 } else {
113 struct tc_fifo_qopt *ctl = nla_data(opt);
114
115 if (nla_len(opt) < sizeof(*ctl))
116 return -EINVAL;
117
118 sch->limit = ctl->limit;
119 }
120
121 if (is_bfifo)
122 bypass = sch->limit >= psched_mtu(qdisc_dev(sch));
123 else
124 bypass = sch->limit >= 1;
125
126 if (bypass)
127 sch->flags |= TCQ_F_CAN_BYPASS;
128 else
129 sch->flags &= ~TCQ_F_CAN_BYPASS;
130
131 return 0;
132 }
133
fifo_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)134 static int fifo_init(struct Qdisc *sch, struct nlattr *opt,
135 struct netlink_ext_ack *extack)
136 {
137 int err;
138
139 err = __fifo_init(sch, opt, extack);
140 if (err)
141 return err;
142
143 fifo_offload_init(sch);
144 return 0;
145 }
146
fifo_hd_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)147 static int fifo_hd_init(struct Qdisc *sch, struct nlattr *opt,
148 struct netlink_ext_ack *extack)
149 {
150 return __fifo_init(sch, opt, extack);
151 }
152
fifo_destroy(struct Qdisc * sch)153 static void fifo_destroy(struct Qdisc *sch)
154 {
155 fifo_offload_destroy(sch);
156 }
157
__fifo_dump(struct Qdisc * sch,struct sk_buff * skb)158 static int __fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
159 {
160 struct tc_fifo_qopt opt = { .limit = sch->limit };
161
162 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
163 goto nla_put_failure;
164 return skb->len;
165
166 nla_put_failure:
167 return -1;
168 }
169
fifo_dump(struct Qdisc * sch,struct sk_buff * skb)170 static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
171 {
172 int err;
173
174 err = fifo_offload_dump(sch);
175 if (err)
176 return err;
177
178 return __fifo_dump(sch, skb);
179 }
180
fifo_hd_dump(struct Qdisc * sch,struct sk_buff * skb)181 static int fifo_hd_dump(struct Qdisc *sch, struct sk_buff *skb)
182 {
183 return __fifo_dump(sch, skb);
184 }
185
186 struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
187 .id = "pfifo",
188 .priv_size = 0,
189 .enqueue = pfifo_enqueue,
190 .dequeue = qdisc_dequeue_head,
191 .peek = qdisc_peek_head,
192 .init = fifo_init,
193 .destroy = fifo_destroy,
194 .reset = qdisc_reset_queue,
195 .change = fifo_init,
196 .dump = fifo_dump,
197 .owner = THIS_MODULE,
198 };
199 EXPORT_SYMBOL(pfifo_qdisc_ops);
200
201 struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
202 .id = "bfifo",
203 .priv_size = 0,
204 .enqueue = bfifo_enqueue,
205 .dequeue = qdisc_dequeue_head,
206 .peek = qdisc_peek_head,
207 .init = fifo_init,
208 .destroy = fifo_destroy,
209 .reset = qdisc_reset_queue,
210 .change = fifo_init,
211 .dump = fifo_dump,
212 .owner = THIS_MODULE,
213 };
214 EXPORT_SYMBOL(bfifo_qdisc_ops);
215
216 struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
217 .id = "pfifo_head_drop",
218 .priv_size = 0,
219 .enqueue = pfifo_tail_enqueue,
220 .dequeue = qdisc_dequeue_head,
221 .peek = qdisc_peek_head,
222 .init = fifo_hd_init,
223 .reset = qdisc_reset_queue,
224 .change = fifo_hd_init,
225 .dump = fifo_hd_dump,
226 .owner = THIS_MODULE,
227 };
228
229 /* Pass size change message down to embedded FIFO */
fifo_set_limit(struct Qdisc * q,unsigned int limit)230 int fifo_set_limit(struct Qdisc *q, unsigned int limit)
231 {
232 struct nlattr *nla;
233 int ret = -ENOMEM;
234
235 /* Hack to avoid sending change message to non-FIFO */
236 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
237 return 0;
238
239 if (!q->ops->change)
240 return 0;
241
242 nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
243 if (nla) {
244 nla->nla_type = RTM_NEWQDISC;
245 nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
246 ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
247
248 ret = q->ops->change(q, nla, NULL);
249 kfree(nla);
250 }
251 return ret;
252 }
253 EXPORT_SYMBOL(fifo_set_limit);
254
fifo_create_dflt(struct Qdisc * sch,struct Qdisc_ops * ops,unsigned int limit,struct netlink_ext_ack * extack)255 struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
256 unsigned int limit,
257 struct netlink_ext_ack *extack)
258 {
259 struct Qdisc *q;
260 int err = -ENOMEM;
261
262 q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1),
263 extack);
264 if (q) {
265 err = fifo_set_limit(q, limit);
266 if (err < 0) {
267 qdisc_put(q);
268 q = NULL;
269 }
270 }
271
272 return q ? : ERR_PTR(err);
273 }
274 EXPORT_SYMBOL(fifo_create_dflt);
275