1 /* 2 * net/core/gen_stats.c 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Thomas Graf <tgraf@suug.ch> 10 * Jamal Hadi Salim 11 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 12 * 13 * See Documentation/networking/gen_stats.txt 14 */ 15 16 #include <linux/types.h> 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/interrupt.h> 20 #include <linux/socket.h> 21 #include <linux/rtnetlink.h> 22 #include <linux/gen_stats.h> 23 #include <net/netlink.h> 24 #include <net/gen_stats.h> 25 26 27 static inline int 28 gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr) 29 { 30 if (nla_put_64bit(d->skb, type, size, buf, padattr)) 31 goto nla_put_failure; 32 return 0; 33 34 nla_put_failure: 35 if (d->lock) 36 spin_unlock_bh(d->lock); 37 kfree(d->xstats); 38 d->xstats = NULL; 39 d->xstats_len = 0; 40 return -1; 41 } 42 43 /** 44 * gnet_stats_start_copy_compat - start dumping procedure in compatibility mode 45 * @skb: socket buffer to put statistics TLVs into 46 * @type: TLV type for top level statistic TLV 47 * @tc_stats_type: TLV type for backward compatibility struct tc_stats TLV 48 * @xstats_type: TLV type for backward compatibility xstats TLV 49 * @lock: statistics lock 50 * @d: dumping handle 51 * @padattr: padding attribute 52 * 53 * Initializes the dumping handle, grabs the statistic lock and appends 54 * an empty TLV header to the socket buffer for use a container for all 55 * other statistic TLVS. 56 * 57 * The dumping handle is marked to be in backward compatibility mode telling 58 * all gnet_stats_copy_XXX() functions to fill a local copy of struct tc_stats. 59 * 60 * Returns 0 on success or -1 if the room in the socket buffer was not sufficient. 61 */ 62 int 63 gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type, 64 int xstats_type, spinlock_t *lock, 65 struct gnet_dump *d, int padattr) 66 __acquires(lock) 67 { 68 memset(d, 0, sizeof(*d)); 69 70 if (type) 71 d->tail = (struct nlattr *)skb_tail_pointer(skb); 72 d->skb = skb; 73 d->compat_tc_stats = tc_stats_type; 74 d->compat_xstats = xstats_type; 75 d->padattr = padattr; 76 if (lock) { 77 d->lock = lock; 78 spin_lock_bh(lock); 79 } 80 if (d->tail) 81 return gnet_stats_copy(d, type, NULL, 0, padattr); 82 83 return 0; 84 } 85 EXPORT_SYMBOL(gnet_stats_start_copy_compat); 86 87 /** 88 * gnet_stats_start_copy - start dumping procedure in compatibility mode 89 * @skb: socket buffer to put statistics TLVs into 90 * @type: TLV type for top level statistic TLV 91 * @lock: statistics lock 92 * @d: dumping handle 93 * @padattr: padding attribute 94 * 95 * Initializes the dumping handle, grabs the statistic lock and appends 96 * an empty TLV header to the socket buffer for use a container for all 97 * other statistic TLVS. 98 * 99 * Returns 0 on success or -1 if the room in the socket buffer was not sufficient. 100 */ 101 int 102 gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock, 103 struct gnet_dump *d, int padattr) 104 { 105 return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d, padattr); 106 } 107 EXPORT_SYMBOL(gnet_stats_start_copy); 108 109 static void 110 __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats, 111 struct gnet_stats_basic_cpu __percpu *cpu) 112 { 113 int i; 114 115 for_each_possible_cpu(i) { 116 struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i); 117 unsigned int start; 118 u64 bytes; 119 u32 packets; 120 121 do { 122 start = u64_stats_fetch_begin_irq(&bcpu->syncp); 123 bytes = bcpu->bstats.bytes; 124 packets = bcpu->bstats.packets; 125 } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start)); 126 127 bstats->bytes += bytes; 128 bstats->packets += packets; 129 } 130 } 131 132 void 133 __gnet_stats_copy_basic(const seqcount_t *running, 134 struct gnet_stats_basic_packed *bstats, 135 struct gnet_stats_basic_cpu __percpu *cpu, 136 struct gnet_stats_basic_packed *b) 137 { 138 unsigned int seq; 139 140 if (cpu) { 141 __gnet_stats_copy_basic_cpu(bstats, cpu); 142 return; 143 } 144 do { 145 if (running) 146 seq = read_seqcount_begin(running); 147 bstats->bytes = b->bytes; 148 bstats->packets = b->packets; 149 } while (running && read_seqcount_retry(running, seq)); 150 } 151 EXPORT_SYMBOL(__gnet_stats_copy_basic); 152 153 /** 154 * gnet_stats_copy_basic - copy basic statistics into statistic TLV 155 * @running: seqcount_t pointer 156 * @d: dumping handle 157 * @cpu: copy statistic per cpu 158 * @b: basic statistics 159 * 160 * Appends the basic statistics to the top level TLV created by 161 * gnet_stats_start_copy(). 162 * 163 * Returns 0 on success or -1 with the statistic lock released 164 * if the room in the socket buffer was not sufficient. 165 */ 166 int 167 gnet_stats_copy_basic(const seqcount_t *running, 168 struct gnet_dump *d, 169 struct gnet_stats_basic_cpu __percpu *cpu, 170 struct gnet_stats_basic_packed *b) 171 { 172 struct gnet_stats_basic_packed bstats = {0}; 173 174 __gnet_stats_copy_basic(running, &bstats, cpu, b); 175 176 if (d->compat_tc_stats) { 177 d->tc_stats.bytes = bstats.bytes; 178 d->tc_stats.packets = bstats.packets; 179 } 180 181 if (d->tail) { 182 struct gnet_stats_basic sb; 183 184 memset(&sb, 0, sizeof(sb)); 185 sb.bytes = bstats.bytes; 186 sb.packets = bstats.packets; 187 return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb), 188 TCA_STATS_PAD); 189 } 190 return 0; 191 } 192 EXPORT_SYMBOL(gnet_stats_copy_basic); 193 194 /** 195 * gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV 196 * @d: dumping handle 197 * @b: basic statistics 198 * @r: rate estimator statistics 199 * 200 * Appends the rate estimator statistics to the top level TLV created by 201 * gnet_stats_start_copy(). 202 * 203 * Returns 0 on success or -1 with the statistic lock released 204 * if the room in the socket buffer was not sufficient. 205 */ 206 int 207 gnet_stats_copy_rate_est(struct gnet_dump *d, 208 const struct gnet_stats_basic_packed *b, 209 struct gnet_stats_rate_est64 *r) 210 { 211 struct gnet_stats_rate_est est; 212 int res; 213 214 if (b && !gen_estimator_active(b, r)) 215 return 0; 216 217 est.bps = min_t(u64, UINT_MAX, r->bps); 218 /* we have some time before reaching 2^32 packets per second */ 219 est.pps = r->pps; 220 221 if (d->compat_tc_stats) { 222 d->tc_stats.bps = est.bps; 223 d->tc_stats.pps = est.pps; 224 } 225 226 if (d->tail) { 227 res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est), 228 TCA_STATS_PAD); 229 if (res < 0 || est.bps == r->bps) 230 return res; 231 /* emit 64bit stats only if needed */ 232 return gnet_stats_copy(d, TCA_STATS_RATE_EST64, r, sizeof(*r), 233 TCA_STATS_PAD); 234 } 235 236 return 0; 237 } 238 EXPORT_SYMBOL(gnet_stats_copy_rate_est); 239 240 static void 241 __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats, 242 const struct gnet_stats_queue __percpu *q) 243 { 244 int i; 245 246 for_each_possible_cpu(i) { 247 const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i); 248 249 qstats->qlen = 0; 250 qstats->backlog += qcpu->backlog; 251 qstats->drops += qcpu->drops; 252 qstats->requeues += qcpu->requeues; 253 qstats->overlimits += qcpu->overlimits; 254 } 255 } 256 257 static void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats, 258 const struct gnet_stats_queue __percpu *cpu, 259 const struct gnet_stats_queue *q, 260 __u32 qlen) 261 { 262 if (cpu) { 263 __gnet_stats_copy_queue_cpu(qstats, cpu); 264 } else { 265 qstats->qlen = q->qlen; 266 qstats->backlog = q->backlog; 267 qstats->drops = q->drops; 268 qstats->requeues = q->requeues; 269 qstats->overlimits = q->overlimits; 270 } 271 272 qstats->qlen = qlen; 273 } 274 275 /** 276 * gnet_stats_copy_queue - copy queue statistics into statistics TLV 277 * @d: dumping handle 278 * @cpu_q: per cpu queue statistics 279 * @q: queue statistics 280 * @qlen: queue length statistics 281 * 282 * Appends the queue statistics to the top level TLV created by 283 * gnet_stats_start_copy(). Using per cpu queue statistics if 284 * they are available. 285 * 286 * Returns 0 on success or -1 with the statistic lock released 287 * if the room in the socket buffer was not sufficient. 288 */ 289 int 290 gnet_stats_copy_queue(struct gnet_dump *d, 291 struct gnet_stats_queue __percpu *cpu_q, 292 struct gnet_stats_queue *q, __u32 qlen) 293 { 294 struct gnet_stats_queue qstats = {0}; 295 296 __gnet_stats_copy_queue(&qstats, cpu_q, q, qlen); 297 298 if (d->compat_tc_stats) { 299 d->tc_stats.drops = qstats.drops; 300 d->tc_stats.qlen = qstats.qlen; 301 d->tc_stats.backlog = qstats.backlog; 302 d->tc_stats.overlimits = qstats.overlimits; 303 } 304 305 if (d->tail) 306 return gnet_stats_copy(d, TCA_STATS_QUEUE, 307 &qstats, sizeof(qstats), 308 TCA_STATS_PAD); 309 310 return 0; 311 } 312 EXPORT_SYMBOL(gnet_stats_copy_queue); 313 314 /** 315 * gnet_stats_copy_app - copy application specific statistics into statistics TLV 316 * @d: dumping handle 317 * @st: application specific statistics data 318 * @len: length of data 319 * 320 * Appends the application specific statistics to the top level TLV created by 321 * gnet_stats_start_copy() and remembers the data for XSTATS if the dumping 322 * handle is in backward compatibility mode. 323 * 324 * Returns 0 on success or -1 with the statistic lock released 325 * if the room in the socket buffer was not sufficient. 326 */ 327 int 328 gnet_stats_copy_app(struct gnet_dump *d, void *st, int len) 329 { 330 if (d->compat_xstats) { 331 d->xstats = kmemdup(st, len, GFP_ATOMIC); 332 if (!d->xstats) 333 goto err_out; 334 d->xstats_len = len; 335 } 336 337 if (d->tail) 338 return gnet_stats_copy(d, TCA_STATS_APP, st, len, 339 TCA_STATS_PAD); 340 341 return 0; 342 343 err_out: 344 if (d->lock) 345 spin_unlock_bh(d->lock); 346 d->xstats_len = 0; 347 return -1; 348 } 349 EXPORT_SYMBOL(gnet_stats_copy_app); 350 351 /** 352 * gnet_stats_finish_copy - finish dumping procedure 353 * @d: dumping handle 354 * 355 * Corrects the length of the top level TLV to include all TLVs added 356 * by gnet_stats_copy_XXX() calls. Adds the backward compatibility TLVs 357 * if gnet_stats_start_copy_compat() was used and releases the statistics 358 * lock. 359 * 360 * Returns 0 on success or -1 with the statistic lock released 361 * if the room in the socket buffer was not sufficient. 362 */ 363 int 364 gnet_stats_finish_copy(struct gnet_dump *d) 365 { 366 if (d->tail) 367 d->tail->nla_len = skb_tail_pointer(d->skb) - (u8 *)d->tail; 368 369 if (d->compat_tc_stats) 370 if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats, 371 sizeof(d->tc_stats), d->padattr) < 0) 372 return -1; 373 374 if (d->compat_xstats && d->xstats) { 375 if (gnet_stats_copy(d, d->compat_xstats, d->xstats, 376 d->xstats_len, d->padattr) < 0) 377 return -1; 378 } 379 380 if (d->lock) 381 spin_unlock_bh(d->lock); 382 kfree(d->xstats); 383 d->xstats = NULL; 384 d->xstats_len = 0; 385 return 0; 386 } 387 EXPORT_SYMBOL(gnet_stats_finish_copy); 388