xref: /openbmc/linux/net/core/gen_stats.c (revision 4a44a19b)
1 /*
2  * net/core/gen_stats.c
3  *
4  *             This program is free software; you can redistribute it and/or
5  *             modify it under the terms of the GNU General Public License
6  *             as published by the Free Software Foundation; either version
7  *             2 of the License, or (at your option) any later version.
8  *
9  * Authors:  Thomas Graf <tgraf@suug.ch>
10  *           Jamal Hadi Salim
11  *           Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12  *
13  * See Documentation/networking/gen_stats.txt
14  */
15 
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/socket.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/gen_stats.h>
23 #include <net/netlink.h>
24 #include <net/gen_stats.h>
25 
26 
27 static inline int
28 gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size)
29 {
30 	if (nla_put(d->skb, type, size, buf))
31 		goto nla_put_failure;
32 	return 0;
33 
34 nla_put_failure:
35 	spin_unlock_bh(d->lock);
36 	return -1;
37 }
38 
39 /**
40  * gnet_stats_start_copy_compat - start dumping procedure in compatibility mode
41  * @skb: socket buffer to put statistics TLVs into
42  * @type: TLV type for top level statistic TLV
43  * @tc_stats_type: TLV type for backward compatibility struct tc_stats TLV
44  * @xstats_type: TLV type for backward compatibility xstats TLV
45  * @lock: statistics lock
46  * @d: dumping handle
47  *
48  * Initializes the dumping handle, grabs the statistic lock and appends
49  * an empty TLV header to the socket buffer for use a container for all
50  * other statistic TLVS.
51  *
52  * The dumping handle is marked to be in backward compatibility mode telling
53  * all gnet_stats_copy_XXX() functions to fill a local copy of struct tc_stats.
54  *
55  * Returns 0 on success or -1 if the room in the socket buffer was not sufficient.
56  */
57 int
58 gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
59 	int xstats_type, spinlock_t *lock, struct gnet_dump *d)
60 	__acquires(lock)
61 {
62 	memset(d, 0, sizeof(*d));
63 
64 	spin_lock_bh(lock);
65 	d->lock = lock;
66 	if (type)
67 		d->tail = (struct nlattr *)skb_tail_pointer(skb);
68 	d->skb = skb;
69 	d->compat_tc_stats = tc_stats_type;
70 	d->compat_xstats = xstats_type;
71 
72 	if (d->tail)
73 		return gnet_stats_copy(d, type, NULL, 0);
74 
75 	return 0;
76 }
77 EXPORT_SYMBOL(gnet_stats_start_copy_compat);
78 
79 /**
80  * gnet_stats_start_copy_compat - start dumping procedure in compatibility mode
81  * @skb: socket buffer to put statistics TLVs into
82  * @type: TLV type for top level statistic TLV
83  * @lock: statistics lock
84  * @d: dumping handle
85  *
86  * Initializes the dumping handle, grabs the statistic lock and appends
87  * an empty TLV header to the socket buffer for use a container for all
88  * other statistic TLVS.
89  *
90  * Returns 0 on success or -1 if the room in the socket buffer was not sufficient.
91  */
92 int
93 gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
94 	struct gnet_dump *d)
95 {
96 	return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d);
97 }
98 EXPORT_SYMBOL(gnet_stats_start_copy);
99 
100 static void
101 __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
102 			    struct gnet_stats_basic_cpu __percpu *cpu)
103 {
104 	int i;
105 
106 	for_each_possible_cpu(i) {
107 		struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i);
108 		unsigned int start;
109 		u64 bytes;
110 		u32 packets;
111 
112 		do {
113 			start = u64_stats_fetch_begin_irq(&bcpu->syncp);
114 			bytes = bcpu->bstats.bytes;
115 			packets = bcpu->bstats.packets;
116 		} while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
117 
118 		bstats->bytes += bytes;
119 		bstats->packets += packets;
120 	}
121 }
122 
123 void
124 __gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats,
125 			struct gnet_stats_basic_cpu __percpu *cpu,
126 			struct gnet_stats_basic_packed *b)
127 {
128 	if (cpu) {
129 		__gnet_stats_copy_basic_cpu(bstats, cpu);
130 	} else {
131 		bstats->bytes = b->bytes;
132 		bstats->packets = b->packets;
133 	}
134 }
135 EXPORT_SYMBOL(__gnet_stats_copy_basic);
136 
137 /**
138  * gnet_stats_copy_basic - copy basic statistics into statistic TLV
139  * @d: dumping handle
140  * @b: basic statistics
141  *
142  * Appends the basic statistics to the top level TLV created by
143  * gnet_stats_start_copy().
144  *
145  * Returns 0 on success or -1 with the statistic lock released
146  * if the room in the socket buffer was not sufficient.
147  */
148 int
149 gnet_stats_copy_basic(struct gnet_dump *d,
150 		      struct gnet_stats_basic_cpu __percpu *cpu,
151 		      struct gnet_stats_basic_packed *b)
152 {
153 	struct gnet_stats_basic_packed bstats = {0};
154 
155 	__gnet_stats_copy_basic(&bstats, cpu, b);
156 
157 	if (d->compat_tc_stats) {
158 		d->tc_stats.bytes = bstats.bytes;
159 		d->tc_stats.packets = bstats.packets;
160 	}
161 
162 	if (d->tail) {
163 		struct gnet_stats_basic sb;
164 
165 		memset(&sb, 0, sizeof(sb));
166 		sb.bytes = bstats.bytes;
167 		sb.packets = bstats.packets;
168 		return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb));
169 	}
170 	return 0;
171 }
172 EXPORT_SYMBOL(gnet_stats_copy_basic);
173 
174 /**
175  * gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV
176  * @d: dumping handle
177  * @b: basic statistics
178  * @r: rate estimator statistics
179  *
180  * Appends the rate estimator statistics to the top level TLV created by
181  * gnet_stats_start_copy().
182  *
183  * Returns 0 on success or -1 with the statistic lock released
184  * if the room in the socket buffer was not sufficient.
185  */
186 int
187 gnet_stats_copy_rate_est(struct gnet_dump *d,
188 			 const struct gnet_stats_basic_packed *b,
189 			 struct gnet_stats_rate_est64 *r)
190 {
191 	struct gnet_stats_rate_est est;
192 	int res;
193 
194 	if (b && !gen_estimator_active(b, r))
195 		return 0;
196 
197 	est.bps = min_t(u64, UINT_MAX, r->bps);
198 	/* we have some time before reaching 2^32 packets per second */
199 	est.pps = r->pps;
200 
201 	if (d->compat_tc_stats) {
202 		d->tc_stats.bps = est.bps;
203 		d->tc_stats.pps = est.pps;
204 	}
205 
206 	if (d->tail) {
207 		res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est));
208 		if (res < 0 || est.bps == r->bps)
209 			return res;
210 		/* emit 64bit stats only if needed */
211 		return gnet_stats_copy(d, TCA_STATS_RATE_EST64, r, sizeof(*r));
212 	}
213 
214 	return 0;
215 }
216 EXPORT_SYMBOL(gnet_stats_copy_rate_est);
217 
218 static void
219 __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
220 			    const struct gnet_stats_queue __percpu *q)
221 {
222 	int i;
223 
224 	for_each_possible_cpu(i) {
225 		const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
226 
227 		qstats->qlen = 0;
228 		qstats->backlog += qcpu->backlog;
229 		qstats->drops += qcpu->drops;
230 		qstats->requeues += qcpu->requeues;
231 		qstats->overlimits += qcpu->overlimits;
232 	}
233 }
234 
235 static void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
236 				    const struct gnet_stats_queue __percpu *cpu,
237 				    const struct gnet_stats_queue *q,
238 				    __u32 qlen)
239 {
240 	if (cpu) {
241 		__gnet_stats_copy_queue_cpu(qstats, cpu);
242 	} else {
243 		qstats->qlen = q->qlen;
244 		qstats->backlog = q->backlog;
245 		qstats->drops = q->drops;
246 		qstats->requeues = q->requeues;
247 		qstats->overlimits = q->overlimits;
248 	}
249 
250 	qstats->qlen = qlen;
251 }
252 
253 /**
254  * gnet_stats_copy_queue - copy queue statistics into statistics TLV
255  * @d: dumping handle
256  * @cpu_q: per cpu queue statistics
257  * @q: queue statistics
258  * @qlen: queue length statistics
259  *
260  * Appends the queue statistics to the top level TLV created by
261  * gnet_stats_start_copy(). Using per cpu queue statistics if
262  * they are available.
263  *
264  * Returns 0 on success or -1 with the statistic lock released
265  * if the room in the socket buffer was not sufficient.
266  */
267 int
268 gnet_stats_copy_queue(struct gnet_dump *d,
269 		      struct gnet_stats_queue __percpu *cpu_q,
270 		      struct gnet_stats_queue *q, __u32 qlen)
271 {
272 	struct gnet_stats_queue qstats = {0};
273 
274 	__gnet_stats_copy_queue(&qstats, cpu_q, q, qlen);
275 
276 	if (d->compat_tc_stats) {
277 		d->tc_stats.drops = qstats.drops;
278 		d->tc_stats.qlen = qstats.qlen;
279 		d->tc_stats.backlog = qstats.backlog;
280 		d->tc_stats.overlimits = qstats.overlimits;
281 	}
282 
283 	if (d->tail)
284 		return gnet_stats_copy(d, TCA_STATS_QUEUE,
285 				       &qstats, sizeof(qstats));
286 
287 	return 0;
288 }
289 EXPORT_SYMBOL(gnet_stats_copy_queue);
290 
291 /**
292  * gnet_stats_copy_app - copy application specific statistics into statistics TLV
293  * @d: dumping handle
294  * @st: application specific statistics data
295  * @len: length of data
296  *
297  * Appends the application specific statistics to the top level TLV created by
298  * gnet_stats_start_copy() and remembers the data for XSTATS if the dumping
299  * handle is in backward compatibility mode.
300  *
301  * Returns 0 on success or -1 with the statistic lock released
302  * if the room in the socket buffer was not sufficient.
303  */
304 int
305 gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
306 {
307 	if (d->compat_xstats) {
308 		d->xstats = st;
309 		d->xstats_len = len;
310 	}
311 
312 	if (d->tail)
313 		return gnet_stats_copy(d, TCA_STATS_APP, st, len);
314 
315 	return 0;
316 }
317 EXPORT_SYMBOL(gnet_stats_copy_app);
318 
319 /**
320  * gnet_stats_finish_copy - finish dumping procedure
321  * @d: dumping handle
322  *
323  * Corrects the length of the top level TLV to include all TLVs added
324  * by gnet_stats_copy_XXX() calls. Adds the backward compatibility TLVs
325  * if gnet_stats_start_copy_compat() was used and releases the statistics
326  * lock.
327  *
328  * Returns 0 on success or -1 with the statistic lock released
329  * if the room in the socket buffer was not sufficient.
330  */
331 int
332 gnet_stats_finish_copy(struct gnet_dump *d)
333 {
334 	if (d->tail)
335 		d->tail->nla_len = skb_tail_pointer(d->skb) - (u8 *)d->tail;
336 
337 	if (d->compat_tc_stats)
338 		if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats,
339 			sizeof(d->tc_stats)) < 0)
340 			return -1;
341 
342 	if (d->compat_xstats && d->xstats) {
343 		if (gnet_stats_copy(d, d->compat_xstats, d->xstats,
344 			d->xstats_len) < 0)
345 			return -1;
346 	}
347 
348 	spin_unlock_bh(d->lock);
349 	return 0;
350 }
351 EXPORT_SYMBOL(gnet_stats_finish_copy);
352