xref: /openbmc/linux/include/net/fq_impl.h (revision 0edbfea5)
1 /*
2  * Copyright (c) 2016 Qualcomm Atheros, Inc
3  *
4  * GPL v2
5  *
6  * Based on net/sched/sch_fq_codel.c
7  */
8 #ifndef __NET_SCHED_FQ_IMPL_H
9 #define __NET_SCHED_FQ_IMPL_H
10 
11 #include <net/fq.h>
12 
13 /* functions that are embedded into includer */
14 
15 static struct sk_buff *fq_flow_dequeue(struct fq *fq,
16 				       struct fq_flow *flow)
17 {
18 	struct fq_tin *tin = flow->tin;
19 	struct fq_flow *i;
20 	struct sk_buff *skb;
21 
22 	lockdep_assert_held(&fq->lock);
23 
24 	skb = __skb_dequeue(&flow->queue);
25 	if (!skb)
26 		return NULL;
27 
28 	tin->backlog_bytes -= skb->len;
29 	tin->backlog_packets--;
30 	flow->backlog -= skb->len;
31 	fq->backlog--;
32 
33 	if (flow->backlog == 0) {
34 		list_del_init(&flow->backlogchain);
35 	} else {
36 		i = flow;
37 
38 		list_for_each_entry_continue(i, &fq->backlogs, backlogchain)
39 			if (i->backlog < flow->backlog)
40 				break;
41 
42 		list_move_tail(&flow->backlogchain,
43 			       &i->backlogchain);
44 	}
45 
46 	return skb;
47 }
48 
49 static struct sk_buff *fq_tin_dequeue(struct fq *fq,
50 				      struct fq_tin *tin,
51 				      fq_tin_dequeue_t dequeue_func)
52 {
53 	struct fq_flow *flow;
54 	struct list_head *head;
55 	struct sk_buff *skb;
56 
57 	lockdep_assert_held(&fq->lock);
58 
59 begin:
60 	head = &tin->new_flows;
61 	if (list_empty(head)) {
62 		head = &tin->old_flows;
63 		if (list_empty(head))
64 			return NULL;
65 	}
66 
67 	flow = list_first_entry(head, struct fq_flow, flowchain);
68 
69 	if (flow->deficit <= 0) {
70 		flow->deficit += fq->quantum;
71 		list_move_tail(&flow->flowchain,
72 			       &tin->old_flows);
73 		goto begin;
74 	}
75 
76 	skb = dequeue_func(fq, tin, flow);
77 	if (!skb) {
78 		/* force a pass through old_flows to prevent starvation */
79 		if ((head == &tin->new_flows) &&
80 		    !list_empty(&tin->old_flows)) {
81 			list_move_tail(&flow->flowchain, &tin->old_flows);
82 		} else {
83 			list_del_init(&flow->flowchain);
84 			flow->tin = NULL;
85 		}
86 		goto begin;
87 	}
88 
89 	flow->deficit -= skb->len;
90 	tin->tx_bytes += skb->len;
91 	tin->tx_packets++;
92 
93 	return skb;
94 }
95 
96 static struct fq_flow *fq_flow_classify(struct fq *fq,
97 					struct fq_tin *tin,
98 					struct sk_buff *skb,
99 					fq_flow_get_default_t get_default_func)
100 {
101 	struct fq_flow *flow;
102 	u32 hash;
103 	u32 idx;
104 
105 	lockdep_assert_held(&fq->lock);
106 
107 	hash = skb_get_hash_perturb(skb, fq->perturbation);
108 	idx = reciprocal_scale(hash, fq->flows_cnt);
109 	flow = &fq->flows[idx];
110 
111 	if (flow->tin && flow->tin != tin) {
112 		flow = get_default_func(fq, tin, idx, skb);
113 		tin->collisions++;
114 		fq->collisions++;
115 	}
116 
117 	if (!flow->tin)
118 		tin->flows++;
119 
120 	return flow;
121 }
122 
123 static void fq_recalc_backlog(struct fq *fq,
124 			      struct fq_tin *tin,
125 			      struct fq_flow *flow)
126 {
127 	struct fq_flow *i;
128 
129 	if (list_empty(&flow->backlogchain))
130 		list_add_tail(&flow->backlogchain, &fq->backlogs);
131 
132 	i = flow;
133 	list_for_each_entry_continue_reverse(i, &fq->backlogs,
134 					     backlogchain)
135 		if (i->backlog > flow->backlog)
136 			break;
137 
138 	list_move(&flow->backlogchain, &i->backlogchain);
139 }
140 
141 static void fq_tin_enqueue(struct fq *fq,
142 			   struct fq_tin *tin,
143 			   struct sk_buff *skb,
144 			   fq_skb_free_t free_func,
145 			   fq_flow_get_default_t get_default_func)
146 {
147 	struct fq_flow *flow;
148 
149 	lockdep_assert_held(&fq->lock);
150 
151 	flow = fq_flow_classify(fq, tin, skb, get_default_func);
152 
153 	flow->tin = tin;
154 	flow->backlog += skb->len;
155 	tin->backlog_bytes += skb->len;
156 	tin->backlog_packets++;
157 	fq->backlog++;
158 
159 	fq_recalc_backlog(fq, tin, flow);
160 
161 	if (list_empty(&flow->flowchain)) {
162 		flow->deficit = fq->quantum;
163 		list_add_tail(&flow->flowchain,
164 			      &tin->new_flows);
165 	}
166 
167 	__skb_queue_tail(&flow->queue, skb);
168 
169 	if (fq->backlog > fq->limit) {
170 		flow = list_first_entry_or_null(&fq->backlogs,
171 						struct fq_flow,
172 						backlogchain);
173 		if (!flow)
174 			return;
175 
176 		skb = fq_flow_dequeue(fq, flow);
177 		if (!skb)
178 			return;
179 
180 		free_func(fq, flow->tin, flow, skb);
181 
182 		flow->tin->overlimit++;
183 		fq->overlimit++;
184 	}
185 }
186 
187 static void fq_flow_reset(struct fq *fq,
188 			  struct fq_flow *flow,
189 			  fq_skb_free_t free_func)
190 {
191 	struct sk_buff *skb;
192 
193 	while ((skb = fq_flow_dequeue(fq, flow)))
194 		free_func(fq, flow->tin, flow, skb);
195 
196 	if (!list_empty(&flow->flowchain))
197 		list_del_init(&flow->flowchain);
198 
199 	if (!list_empty(&flow->backlogchain))
200 		list_del_init(&flow->backlogchain);
201 
202 	flow->tin = NULL;
203 
204 	WARN_ON_ONCE(flow->backlog);
205 }
206 
207 static void fq_tin_reset(struct fq *fq,
208 			 struct fq_tin *tin,
209 			 fq_skb_free_t free_func)
210 {
211 	struct list_head *head;
212 	struct fq_flow *flow;
213 
214 	for (;;) {
215 		head = &tin->new_flows;
216 		if (list_empty(head)) {
217 			head = &tin->old_flows;
218 			if (list_empty(head))
219 				break;
220 		}
221 
222 		flow = list_first_entry(head, struct fq_flow, flowchain);
223 		fq_flow_reset(fq, flow, free_func);
224 	}
225 
226 	WARN_ON_ONCE(tin->backlog_bytes);
227 	WARN_ON_ONCE(tin->backlog_packets);
228 }
229 
230 static void fq_flow_init(struct fq_flow *flow)
231 {
232 	INIT_LIST_HEAD(&flow->flowchain);
233 	INIT_LIST_HEAD(&flow->backlogchain);
234 	__skb_queue_head_init(&flow->queue);
235 }
236 
237 static void fq_tin_init(struct fq_tin *tin)
238 {
239 	INIT_LIST_HEAD(&tin->new_flows);
240 	INIT_LIST_HEAD(&tin->old_flows);
241 }
242 
243 static int fq_init(struct fq *fq, int flows_cnt)
244 {
245 	int i;
246 
247 	memset(fq, 0, sizeof(fq[0]));
248 	INIT_LIST_HEAD(&fq->backlogs);
249 	spin_lock_init(&fq->lock);
250 	fq->flows_cnt = max_t(u32, flows_cnt, 1);
251 	fq->perturbation = prandom_u32();
252 	fq->quantum = 300;
253 	fq->limit = 8192;
254 
255 	fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
256 	if (!fq->flows)
257 		return -ENOMEM;
258 
259 	for (i = 0; i < fq->flows_cnt; i++)
260 		fq_flow_init(&fq->flows[i]);
261 
262 	return 0;
263 }
264 
265 static void fq_reset(struct fq *fq,
266 		     fq_skb_free_t free_func)
267 {
268 	int i;
269 
270 	for (i = 0; i < fq->flows_cnt; i++)
271 		fq_flow_reset(fq, &fq->flows[i], free_func);
272 
273 	kfree(fq->flows);
274 	fq->flows = NULL;
275 }
276 
277 #endif
278