xref: /openbmc/linux/net/core/gen_stats.c (revision a48c7709)
1 /*
2  * net/core/gen_stats.c
3  *
4  *             This program is free software; you can redistribute it and/or
5  *             modify it under the terms of the GNU General Public License
6  *             as published by the Free Software Foundation; either version
7  *             2 of the License, or (at your option) any later version.
8  *
9  * Authors:  Thomas Graf <tgraf@suug.ch>
10  *           Jamal Hadi Salim
11  *           Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12  *
13  * See Documentation/networking/gen_stats.txt
14  */
15 
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/socket.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/gen_stats.h>
23 #include <net/netlink.h>
24 #include <net/gen_stats.h>
25 
26 
27 static inline int
28 gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr)
29 {
30 	if (nla_put_64bit(d->skb, type, size, buf, padattr))
31 		goto nla_put_failure;
32 	return 0;
33 
34 nla_put_failure:
35 	if (d->lock)
36 		spin_unlock_bh(d->lock);
37 	kfree(d->xstats);
38 	d->xstats = NULL;
39 	d->xstats_len = 0;
40 	return -1;
41 }
42 
43 /**
44  * gnet_stats_start_copy_compat - start dumping procedure in compatibility mode
45  * @skb: socket buffer to put statistics TLVs into
46  * @type: TLV type for top level statistic TLV
47  * @tc_stats_type: TLV type for backward compatibility struct tc_stats TLV
48  * @xstats_type: TLV type for backward compatibility xstats TLV
49  * @lock: statistics lock
50  * @d: dumping handle
51  * @padattr: padding attribute
52  *
53  * Initializes the dumping handle, grabs the statistic lock and appends
54  * an empty TLV header to the socket buffer for use a container for all
55  * other statistic TLVS.
56  *
57  * The dumping handle is marked to be in backward compatibility mode telling
58  * all gnet_stats_copy_XXX() functions to fill a local copy of struct tc_stats.
59  *
60  * Returns 0 on success or -1 if the room in the socket buffer was not sufficient.
61  */
62 int
63 gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
64 			     int xstats_type, spinlock_t *lock,
65 			     struct gnet_dump *d, int padattr)
66 	__acquires(lock)
67 {
68 	memset(d, 0, sizeof(*d));
69 
70 	if (type)
71 		d->tail = (struct nlattr *)skb_tail_pointer(skb);
72 	d->skb = skb;
73 	d->compat_tc_stats = tc_stats_type;
74 	d->compat_xstats = xstats_type;
75 	d->padattr = padattr;
76 	if (lock) {
77 		d->lock = lock;
78 		spin_lock_bh(lock);
79 	}
80 	if (d->tail)
81 		return gnet_stats_copy(d, type, NULL, 0, padattr);
82 
83 	return 0;
84 }
85 EXPORT_SYMBOL(gnet_stats_start_copy_compat);
86 
87 /**
88  * gnet_stats_start_copy - start dumping procedure in compatibility mode
89  * @skb: socket buffer to put statistics TLVs into
90  * @type: TLV type for top level statistic TLV
91  * @lock: statistics lock
92  * @d: dumping handle
93  * @padattr: padding attribute
94  *
95  * Initializes the dumping handle, grabs the statistic lock and appends
96  * an empty TLV header to the socket buffer for use a container for all
97  * other statistic TLVS.
98  *
99  * Returns 0 on success or -1 if the room in the socket buffer was not sufficient.
100  */
101 int
102 gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
103 		      struct gnet_dump *d, int padattr)
104 {
105 	return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d, padattr);
106 }
107 EXPORT_SYMBOL(gnet_stats_start_copy);
108 
109 static void
110 __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
111 			    struct gnet_stats_basic_cpu __percpu *cpu)
112 {
113 	int i;
114 
115 	for_each_possible_cpu(i) {
116 		struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i);
117 		unsigned int start;
118 		u64 bytes;
119 		u32 packets;
120 
121 		do {
122 			start = u64_stats_fetch_begin_irq(&bcpu->syncp);
123 			bytes = bcpu->bstats.bytes;
124 			packets = bcpu->bstats.packets;
125 		} while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
126 
127 		bstats->bytes += bytes;
128 		bstats->packets += packets;
129 	}
130 }
131 
132 void
133 __gnet_stats_copy_basic(const seqcount_t *running,
134 			struct gnet_stats_basic_packed *bstats,
135 			struct gnet_stats_basic_cpu __percpu *cpu,
136 			struct gnet_stats_basic_packed *b)
137 {
138 	unsigned int seq;
139 
140 	if (cpu) {
141 		__gnet_stats_copy_basic_cpu(bstats, cpu);
142 		return;
143 	}
144 	do {
145 		if (running)
146 			seq = read_seqcount_begin(running);
147 		bstats->bytes = b->bytes;
148 		bstats->packets = b->packets;
149 	} while (running && read_seqcount_retry(running, seq));
150 }
151 EXPORT_SYMBOL(__gnet_stats_copy_basic);
152 
153 /**
154  * gnet_stats_copy_basic - copy basic statistics into statistic TLV
155  * @running: seqcount_t pointer
156  * @d: dumping handle
157  * @cpu: copy statistic per cpu
158  * @b: basic statistics
159  *
160  * Appends the basic statistics to the top level TLV created by
161  * gnet_stats_start_copy().
162  *
163  * Returns 0 on success or -1 with the statistic lock released
164  * if the room in the socket buffer was not sufficient.
165  */
166 int
167 gnet_stats_copy_basic(const seqcount_t *running,
168 		      struct gnet_dump *d,
169 		      struct gnet_stats_basic_cpu __percpu *cpu,
170 		      struct gnet_stats_basic_packed *b)
171 {
172 	struct gnet_stats_basic_packed bstats = {0};
173 
174 	__gnet_stats_copy_basic(running, &bstats, cpu, b);
175 
176 	if (d->compat_tc_stats) {
177 		d->tc_stats.bytes = bstats.bytes;
178 		d->tc_stats.packets = bstats.packets;
179 	}
180 
181 	if (d->tail) {
182 		struct gnet_stats_basic sb;
183 
184 		memset(&sb, 0, sizeof(sb));
185 		sb.bytes = bstats.bytes;
186 		sb.packets = bstats.packets;
187 		return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb),
188 				       TCA_STATS_PAD);
189 	}
190 	return 0;
191 }
192 EXPORT_SYMBOL(gnet_stats_copy_basic);
193 
194 /**
195  * gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV
196  * @d: dumping handle
197  * @rate_est: rate estimator
198  *
199  * Appends the rate estimator statistics to the top level TLV created by
200  * gnet_stats_start_copy().
201  *
202  * Returns 0 on success or -1 with the statistic lock released
203  * if the room in the socket buffer was not sufficient.
204  */
205 int
206 gnet_stats_copy_rate_est(struct gnet_dump *d,
207 			 struct net_rate_estimator __rcu **rate_est)
208 {
209 	struct gnet_stats_rate_est64 sample;
210 	struct gnet_stats_rate_est est;
211 	int res;
212 
213 	if (!gen_estimator_read(rate_est, &sample))
214 		return 0;
215 	est.bps = min_t(u64, UINT_MAX, sample.bps);
216 	/* we have some time before reaching 2^32 packets per second */
217 	est.pps = sample.pps;
218 
219 	if (d->compat_tc_stats) {
220 		d->tc_stats.bps = est.bps;
221 		d->tc_stats.pps = est.pps;
222 	}
223 
224 	if (d->tail) {
225 		res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est),
226 				      TCA_STATS_PAD);
227 		if (res < 0 || est.bps == sample.bps)
228 			return res;
229 		/* emit 64bit stats only if needed */
230 		return gnet_stats_copy(d, TCA_STATS_RATE_EST64, &sample,
231 				       sizeof(sample), TCA_STATS_PAD);
232 	}
233 
234 	return 0;
235 }
236 EXPORT_SYMBOL(gnet_stats_copy_rate_est);
237 
238 static void
239 __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
240 			    const struct gnet_stats_queue __percpu *q)
241 {
242 	int i;
243 
244 	for_each_possible_cpu(i) {
245 		const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
246 
247 		qstats->qlen = 0;
248 		qstats->backlog += qcpu->backlog;
249 		qstats->drops += qcpu->drops;
250 		qstats->requeues += qcpu->requeues;
251 		qstats->overlimits += qcpu->overlimits;
252 	}
253 }
254 
255 void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
256 			     const struct gnet_stats_queue __percpu *cpu,
257 			     const struct gnet_stats_queue *q,
258 			     __u32 qlen)
259 {
260 	if (cpu) {
261 		__gnet_stats_copy_queue_cpu(qstats, cpu);
262 	} else {
263 		qstats->qlen = q->qlen;
264 		qstats->backlog = q->backlog;
265 		qstats->drops = q->drops;
266 		qstats->requeues = q->requeues;
267 		qstats->overlimits = q->overlimits;
268 	}
269 
270 	qstats->qlen = qlen;
271 }
272 EXPORT_SYMBOL(__gnet_stats_copy_queue);
273 
274 /**
275  * gnet_stats_copy_queue - copy queue statistics into statistics TLV
276  * @d: dumping handle
277  * @cpu_q: per cpu queue statistics
278  * @q: queue statistics
279  * @qlen: queue length statistics
280  *
281  * Appends the queue statistics to the top level TLV created by
282  * gnet_stats_start_copy(). Using per cpu queue statistics if
283  * they are available.
284  *
285  * Returns 0 on success or -1 with the statistic lock released
286  * if the room in the socket buffer was not sufficient.
287  */
288 int
289 gnet_stats_copy_queue(struct gnet_dump *d,
290 		      struct gnet_stats_queue __percpu *cpu_q,
291 		      struct gnet_stats_queue *q, __u32 qlen)
292 {
293 	struct gnet_stats_queue qstats = {0};
294 
295 	__gnet_stats_copy_queue(&qstats, cpu_q, q, qlen);
296 
297 	if (d->compat_tc_stats) {
298 		d->tc_stats.drops = qstats.drops;
299 		d->tc_stats.qlen = qstats.qlen;
300 		d->tc_stats.backlog = qstats.backlog;
301 		d->tc_stats.overlimits = qstats.overlimits;
302 	}
303 
304 	if (d->tail)
305 		return gnet_stats_copy(d, TCA_STATS_QUEUE,
306 				       &qstats, sizeof(qstats),
307 				       TCA_STATS_PAD);
308 
309 	return 0;
310 }
311 EXPORT_SYMBOL(gnet_stats_copy_queue);
312 
313 /**
314  * gnet_stats_copy_app - copy application specific statistics into statistics TLV
315  * @d: dumping handle
316  * @st: application specific statistics data
317  * @len: length of data
318  *
319  * Appends the application specific statistics to the top level TLV created by
320  * gnet_stats_start_copy() and remembers the data for XSTATS if the dumping
321  * handle is in backward compatibility mode.
322  *
323  * Returns 0 on success or -1 with the statistic lock released
324  * if the room in the socket buffer was not sufficient.
325  */
326 int
327 gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
328 {
329 	if (d->compat_xstats) {
330 		d->xstats = kmemdup(st, len, GFP_ATOMIC);
331 		if (!d->xstats)
332 			goto err_out;
333 		d->xstats_len = len;
334 	}
335 
336 	if (d->tail)
337 		return gnet_stats_copy(d, TCA_STATS_APP, st, len,
338 				       TCA_STATS_PAD);
339 
340 	return 0;
341 
342 err_out:
343 	if (d->lock)
344 		spin_unlock_bh(d->lock);
345 	d->xstats_len = 0;
346 	return -1;
347 }
348 EXPORT_SYMBOL(gnet_stats_copy_app);
349 
350 /**
351  * gnet_stats_finish_copy - finish dumping procedure
352  * @d: dumping handle
353  *
354  * Corrects the length of the top level TLV to include all TLVs added
355  * by gnet_stats_copy_XXX() calls. Adds the backward compatibility TLVs
356  * if gnet_stats_start_copy_compat() was used and releases the statistics
357  * lock.
358  *
359  * Returns 0 on success or -1 with the statistic lock released
360  * if the room in the socket buffer was not sufficient.
361  */
362 int
363 gnet_stats_finish_copy(struct gnet_dump *d)
364 {
365 	if (d->tail)
366 		d->tail->nla_len = skb_tail_pointer(d->skb) - (u8 *)d->tail;
367 
368 	if (d->compat_tc_stats)
369 		if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats,
370 				    sizeof(d->tc_stats), d->padattr) < 0)
371 			return -1;
372 
373 	if (d->compat_xstats && d->xstats) {
374 		if (gnet_stats_copy(d, d->compat_xstats, d->xstats,
375 				    d->xstats_len, d->padattr) < 0)
376 			return -1;
377 	}
378 
379 	if (d->lock)
380 		spin_unlock_bh(d->lock);
381 	kfree(d->xstats);
382 	d->xstats = NULL;
383 	d->xstats_len = 0;
384 	return 0;
385 }
386 EXPORT_SYMBOL(gnet_stats_finish_copy);
387