xref: /openbmc/linux/net/sched/sch_ingress.c (revision 4da722ca19f30f7db250db808d1ab1703607a932)
1 /* net/sched/sch_ingress.c - Ingress and clsact qdisc
2  *
3  *              This program is free software; you can redistribute it and/or
4  *              modify it under the terms of the GNU General Public License
5  *              as published by the Free Software Foundation; either version
6  *              2 of the License, or (at your option) any later version.
7  *
8  * Authors:     Jamal Hadi Salim 1999
9  */
10 
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/list.h>
14 #include <linux/skbuff.h>
15 #include <linux/rtnetlink.h>
16 
17 #include <net/netlink.h>
18 #include <net/pkt_sched.h>
19 #include <net/pkt_cls.h>
20 
21 struct ingress_sched_data {
22 	struct tcf_block *block;
23 };
24 
25 static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
26 {
27 	return NULL;
28 }
29 
30 static unsigned long ingress_get(struct Qdisc *sch, u32 classid)
31 {
32 	return TC_H_MIN(classid) + 1;
33 }
34 
35 static bool ingress_cl_offload(u32 classid)
36 {
37 	return true;
38 }
39 
40 static unsigned long ingress_bind_filter(struct Qdisc *sch,
41 					 unsigned long parent, u32 classid)
42 {
43 	return ingress_get(sch, classid);
44 }
45 
46 static void ingress_put(struct Qdisc *sch, unsigned long cl)
47 {
48 }
49 
50 static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
51 {
52 }
53 
54 static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl)
55 {
56 	struct ingress_sched_data *q = qdisc_priv(sch);
57 
58 	return q->block;
59 }
60 
61 static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
62 {
63 	struct ingress_sched_data *q = qdisc_priv(sch);
64 	struct net_device *dev = qdisc_dev(sch);
65 	int err;
66 
67 	err = tcf_block_get(&q->block, &dev->ingress_cl_list);
68 	if (err)
69 		return err;
70 
71 	net_inc_ingress_queue();
72 	sch->flags |= TCQ_F_CPUSTATS;
73 
74 	return 0;
75 }
76 
77 static void ingress_destroy(struct Qdisc *sch)
78 {
79 	struct ingress_sched_data *q = qdisc_priv(sch);
80 
81 	tcf_block_put(q->block);
82 	net_dec_ingress_queue();
83 }
84 
85 static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
86 {
87 	struct nlattr *nest;
88 
89 	nest = nla_nest_start(skb, TCA_OPTIONS);
90 	if (nest == NULL)
91 		goto nla_put_failure;
92 
93 	return nla_nest_end(skb, nest);
94 
95 nla_put_failure:
96 	nla_nest_cancel(skb, nest);
97 	return -1;
98 }
99 
100 static const struct Qdisc_class_ops ingress_class_ops = {
101 	.leaf		=	ingress_leaf,
102 	.get		=	ingress_get,
103 	.put		=	ingress_put,
104 	.walk		=	ingress_walk,
105 	.tcf_block	=	ingress_tcf_block,
106 	.tcf_cl_offload	=	ingress_cl_offload,
107 	.bind_tcf	=	ingress_bind_filter,
108 	.unbind_tcf	=	ingress_put,
109 };
110 
111 static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
112 	.cl_ops		=	&ingress_class_ops,
113 	.id		=	"ingress",
114 	.priv_size	=	sizeof(struct ingress_sched_data),
115 	.init		=	ingress_init,
116 	.destroy	=	ingress_destroy,
117 	.dump		=	ingress_dump,
118 	.owner		=	THIS_MODULE,
119 };
120 
121 struct clsact_sched_data {
122 	struct tcf_block *ingress_block;
123 	struct tcf_block *egress_block;
124 };
125 
126 static unsigned long clsact_get(struct Qdisc *sch, u32 classid)
127 {
128 	switch (TC_H_MIN(classid)) {
129 	case TC_H_MIN(TC_H_MIN_INGRESS):
130 	case TC_H_MIN(TC_H_MIN_EGRESS):
131 		return TC_H_MIN(classid);
132 	default:
133 		return 0;
134 	}
135 }
136 
137 static bool clsact_cl_offload(u32 classid)
138 {
139 	return TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS);
140 }
141 
142 static unsigned long clsact_bind_filter(struct Qdisc *sch,
143 					unsigned long parent, u32 classid)
144 {
145 	return clsact_get(sch, classid);
146 }
147 
148 static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl)
149 {
150 	struct clsact_sched_data *q = qdisc_priv(sch);
151 
152 	switch (cl) {
153 	case TC_H_MIN(TC_H_MIN_INGRESS):
154 		return q->ingress_block;
155 	case TC_H_MIN(TC_H_MIN_EGRESS):
156 		return q->egress_block;
157 	default:
158 		return NULL;
159 	}
160 }
161 
162 static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
163 {
164 	struct clsact_sched_data *q = qdisc_priv(sch);
165 	struct net_device *dev = qdisc_dev(sch);
166 	int err;
167 
168 	err = tcf_block_get(&q->ingress_block, &dev->ingress_cl_list);
169 	if (err)
170 		return err;
171 
172 	err = tcf_block_get(&q->egress_block, &dev->egress_cl_list);
173 	if (err)
174 		return err;
175 
176 	net_inc_ingress_queue();
177 	net_inc_egress_queue();
178 
179 	sch->flags |= TCQ_F_CPUSTATS;
180 
181 	return 0;
182 }
183 
184 static void clsact_destroy(struct Qdisc *sch)
185 {
186 	struct clsact_sched_data *q = qdisc_priv(sch);
187 
188 	tcf_block_put(q->egress_block);
189 	tcf_block_put(q->ingress_block);
190 
191 	net_dec_ingress_queue();
192 	net_dec_egress_queue();
193 }
194 
195 static const struct Qdisc_class_ops clsact_class_ops = {
196 	.leaf		=	ingress_leaf,
197 	.get		=	clsact_get,
198 	.put		=	ingress_put,
199 	.walk		=	ingress_walk,
200 	.tcf_block	=	clsact_tcf_block,
201 	.tcf_cl_offload	=	clsact_cl_offload,
202 	.bind_tcf	=	clsact_bind_filter,
203 	.unbind_tcf	=	ingress_put,
204 };
205 
206 static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
207 	.cl_ops		=	&clsact_class_ops,
208 	.id		=	"clsact",
209 	.priv_size	=	sizeof(struct clsact_sched_data),
210 	.init		=	clsact_init,
211 	.destroy	=	clsact_destroy,
212 	.dump		=	ingress_dump,
213 	.owner		=	THIS_MODULE,
214 };
215 
216 static int __init ingress_module_init(void)
217 {
218 	int ret;
219 
220 	ret = register_qdisc(&ingress_qdisc_ops);
221 	if (!ret) {
222 		ret = register_qdisc(&clsact_qdisc_ops);
223 		if (ret)
224 			unregister_qdisc(&ingress_qdisc_ops);
225 	}
226 
227 	return ret;
228 }
229 
230 static void __exit ingress_module_exit(void)
231 {
232 	unregister_qdisc(&ingress_qdisc_ops);
233 	unregister_qdisc(&clsact_qdisc_ops);
234 }
235 
236 module_init(ingress_module_init);
237 module_exit(ingress_module_exit);
238 
239 MODULE_ALIAS("sch_clsact");
240 MODULE_LICENSE("GPL");
241