1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/core/gen_stats.c 4 * 5 * Authors: Thomas Graf <tgraf@suug.ch> 6 * Jamal Hadi Salim 7 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 8 * 9 * See Documentation/networking/gen_stats.rst 10 */ 11 12 #include <linux/types.h> 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/interrupt.h> 16 #include <linux/socket.h> 17 #include <linux/rtnetlink.h> 18 #include <linux/gen_stats.h> 19 #include <net/netlink.h> 20 #include <net/gen_stats.h> 21 #include <net/sch_generic.h> 22 23 static inline int 24 gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr) 25 { 26 if (nla_put_64bit(d->skb, type, size, buf, padattr)) 27 goto nla_put_failure; 28 return 0; 29 30 nla_put_failure: 31 if (d->lock) 32 spin_unlock_bh(d->lock); 33 kfree(d->xstats); 34 d->xstats = NULL; 35 d->xstats_len = 0; 36 return -1; 37 } 38 39 /** 40 * gnet_stats_start_copy_compat - start dumping procedure in compatibility mode 41 * @skb: socket buffer to put statistics TLVs into 42 * @type: TLV type for top level statistic TLV 43 * @tc_stats_type: TLV type for backward compatibility struct tc_stats TLV 44 * @xstats_type: TLV type for backward compatibility xstats TLV 45 * @lock: statistics lock 46 * @d: dumping handle 47 * @padattr: padding attribute 48 * 49 * Initializes the dumping handle, grabs the statistic lock and appends 50 * an empty TLV header to the socket buffer for use a container for all 51 * other statistic TLVS. 52 * 53 * The dumping handle is marked to be in backward compatibility mode telling 54 * all gnet_stats_copy_XXX() functions to fill a local copy of struct tc_stats. 55 * 56 * Returns 0 on success or -1 if the room in the socket buffer was not sufficient. 57 */ 58 int 59 gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type, 60 int xstats_type, spinlock_t *lock, 61 struct gnet_dump *d, int padattr) 62 __acquires(lock) 63 { 64 memset(d, 0, sizeof(*d)); 65 66 if (type) 67 d->tail = (struct nlattr *)skb_tail_pointer(skb); 68 d->skb = skb; 69 d->compat_tc_stats = tc_stats_type; 70 d->compat_xstats = xstats_type; 71 d->padattr = padattr; 72 if (lock) { 73 d->lock = lock; 74 spin_lock_bh(lock); 75 } 76 if (d->tail) { 77 int ret = gnet_stats_copy(d, type, NULL, 0, padattr); 78 79 /* The initial attribute added in gnet_stats_copy() may be 80 * preceded by a padding attribute, in which case d->tail will 81 * end up pointing at the padding instead of the real attribute. 82 * Fix this so gnet_stats_finish_copy() adjusts the length of 83 * the right attribute. 84 */ 85 if (ret == 0 && d->tail->nla_type == padattr) 86 d->tail = (struct nlattr *)((char *)d->tail + 87 NLA_ALIGN(d->tail->nla_len)); 88 return ret; 89 } 90 91 return 0; 92 } 93 EXPORT_SYMBOL(gnet_stats_start_copy_compat); 94 95 /** 96 * gnet_stats_start_copy - start dumping procedure in compatibility mode 97 * @skb: socket buffer to put statistics TLVs into 98 * @type: TLV type for top level statistic TLV 99 * @lock: statistics lock 100 * @d: dumping handle 101 * @padattr: padding attribute 102 * 103 * Initializes the dumping handle, grabs the statistic lock and appends 104 * an empty TLV header to the socket buffer for use a container for all 105 * other statistic TLVS. 106 * 107 * Returns 0 on success or -1 if the room in the socket buffer was not sufficient. 108 */ 109 int 110 gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock, 111 struct gnet_dump *d, int padattr) 112 { 113 return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d, padattr); 114 } 115 EXPORT_SYMBOL(gnet_stats_start_copy); 116 117 /* Must not be inlined, due to u64_stats seqcount_t lockdep key */ 118 void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b) 119 { 120 u64_stats_set(&b->bytes, 0); 121 u64_stats_set(&b->packets, 0); 122 u64_stats_init(&b->syncp); 123 } 124 EXPORT_SYMBOL(gnet_stats_basic_sync_init); 125 126 static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_sync *bstats, 127 struct gnet_stats_basic_sync __percpu *cpu) 128 { 129 u64 t_bytes = 0, t_packets = 0; 130 int i; 131 132 for_each_possible_cpu(i) { 133 struct gnet_stats_basic_sync *bcpu = per_cpu_ptr(cpu, i); 134 unsigned int start; 135 u64 bytes, packets; 136 137 do { 138 start = u64_stats_fetch_begin_irq(&bcpu->syncp); 139 bytes = u64_stats_read(&bcpu->bytes); 140 packets = u64_stats_read(&bcpu->packets); 141 } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start)); 142 143 t_bytes += bytes; 144 t_packets += packets; 145 } 146 _bstats_update(bstats, t_bytes, t_packets); 147 } 148 149 void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats, 150 struct gnet_stats_basic_sync __percpu *cpu, 151 struct gnet_stats_basic_sync *b, bool running) 152 { 153 unsigned int start; 154 u64 bytes = 0; 155 u64 packets = 0; 156 157 WARN_ON_ONCE((cpu || running) && !in_task()); 158 159 if (cpu) { 160 gnet_stats_add_basic_cpu(bstats, cpu); 161 return; 162 } 163 do { 164 if (running) 165 start = u64_stats_fetch_begin_irq(&b->syncp); 166 bytes = u64_stats_read(&b->bytes); 167 packets = u64_stats_read(&b->packets); 168 } while (running && u64_stats_fetch_retry_irq(&b->syncp, start)); 169 170 _bstats_update(bstats, bytes, packets); 171 } 172 EXPORT_SYMBOL(gnet_stats_add_basic); 173 174 static int 175 ___gnet_stats_copy_basic(struct gnet_dump *d, 176 struct gnet_stats_basic_sync __percpu *cpu, 177 struct gnet_stats_basic_sync *b, 178 int type, bool running) 179 { 180 struct gnet_stats_basic_sync bstats; 181 u64 bstats_bytes, bstats_packets; 182 183 gnet_stats_basic_sync_init(&bstats); 184 gnet_stats_add_basic(&bstats, cpu, b, running); 185 186 bstats_bytes = u64_stats_read(&bstats.bytes); 187 bstats_packets = u64_stats_read(&bstats.packets); 188 189 if (d->compat_tc_stats && type == TCA_STATS_BASIC) { 190 d->tc_stats.bytes = bstats_bytes; 191 d->tc_stats.packets = bstats_packets; 192 } 193 194 if (d->tail) { 195 struct gnet_stats_basic sb; 196 int res; 197 198 memset(&sb, 0, sizeof(sb)); 199 sb.bytes = bstats_bytes; 200 sb.packets = bstats_packets; 201 res = gnet_stats_copy(d, type, &sb, sizeof(sb), TCA_STATS_PAD); 202 if (res < 0 || sb.packets == bstats_packets) 203 return res; 204 /* emit 64bit stats only if needed */ 205 return gnet_stats_copy(d, TCA_STATS_PKT64, &bstats_packets, 206 sizeof(bstats_packets), TCA_STATS_PAD); 207 } 208 return 0; 209 } 210 211 /** 212 * gnet_stats_copy_basic - copy basic statistics into statistic TLV 213 * @d: dumping handle 214 * @cpu: copy statistic per cpu 215 * @b: basic statistics 216 * @running: true if @b represents a running qdisc, thus @b's 217 * internal values might change during basic reads. 218 * Only used if @cpu is NULL 219 * 220 * Context: task; must not be run from IRQ or BH contexts 221 * 222 * Appends the basic statistics to the top level TLV created by 223 * gnet_stats_start_copy(). 224 * 225 * Returns 0 on success or -1 with the statistic lock released 226 * if the room in the socket buffer was not sufficient. 227 */ 228 int 229 gnet_stats_copy_basic(struct gnet_dump *d, 230 struct gnet_stats_basic_sync __percpu *cpu, 231 struct gnet_stats_basic_sync *b, 232 bool running) 233 { 234 return ___gnet_stats_copy_basic(d, cpu, b, TCA_STATS_BASIC, running); 235 } 236 EXPORT_SYMBOL(gnet_stats_copy_basic); 237 238 /** 239 * gnet_stats_copy_basic_hw - copy basic hw statistics into statistic TLV 240 * @d: dumping handle 241 * @cpu: copy statistic per cpu 242 * @b: basic statistics 243 * @running: true if @b represents a running qdisc, thus @b's 244 * internal values might change during basic reads. 245 * Only used if @cpu is NULL 246 * 247 * Context: task; must not be run from IRQ or BH contexts 248 * 249 * Appends the basic statistics to the top level TLV created by 250 * gnet_stats_start_copy(). 251 * 252 * Returns 0 on success or -1 with the statistic lock released 253 * if the room in the socket buffer was not sufficient. 254 */ 255 int 256 gnet_stats_copy_basic_hw(struct gnet_dump *d, 257 struct gnet_stats_basic_sync __percpu *cpu, 258 struct gnet_stats_basic_sync *b, 259 bool running) 260 { 261 return ___gnet_stats_copy_basic(d, cpu, b, TCA_STATS_BASIC_HW, running); 262 } 263 EXPORT_SYMBOL(gnet_stats_copy_basic_hw); 264 265 /** 266 * gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV 267 * @d: dumping handle 268 * @rate_est: rate estimator 269 * 270 * Appends the rate estimator statistics to the top level TLV created by 271 * gnet_stats_start_copy(). 272 * 273 * Returns 0 on success or -1 with the statistic lock released 274 * if the room in the socket buffer was not sufficient. 275 */ 276 int 277 gnet_stats_copy_rate_est(struct gnet_dump *d, 278 struct net_rate_estimator __rcu **rate_est) 279 { 280 struct gnet_stats_rate_est64 sample; 281 struct gnet_stats_rate_est est; 282 int res; 283 284 if (!gen_estimator_read(rate_est, &sample)) 285 return 0; 286 est.bps = min_t(u64, UINT_MAX, sample.bps); 287 /* we have some time before reaching 2^32 packets per second */ 288 est.pps = sample.pps; 289 290 if (d->compat_tc_stats) { 291 d->tc_stats.bps = est.bps; 292 d->tc_stats.pps = est.pps; 293 } 294 295 if (d->tail) { 296 res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est), 297 TCA_STATS_PAD); 298 if (res < 0 || est.bps == sample.bps) 299 return res; 300 /* emit 64bit stats only if needed */ 301 return gnet_stats_copy(d, TCA_STATS_RATE_EST64, &sample, 302 sizeof(sample), TCA_STATS_PAD); 303 } 304 305 return 0; 306 } 307 EXPORT_SYMBOL(gnet_stats_copy_rate_est); 308 309 static void gnet_stats_add_queue_cpu(struct gnet_stats_queue *qstats, 310 const struct gnet_stats_queue __percpu *q) 311 { 312 int i; 313 314 for_each_possible_cpu(i) { 315 const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i); 316 317 qstats->qlen += qcpu->backlog; 318 qstats->backlog += qcpu->backlog; 319 qstats->drops += qcpu->drops; 320 qstats->requeues += qcpu->requeues; 321 qstats->overlimits += qcpu->overlimits; 322 } 323 } 324 325 void gnet_stats_add_queue(struct gnet_stats_queue *qstats, 326 const struct gnet_stats_queue __percpu *cpu, 327 const struct gnet_stats_queue *q) 328 { 329 if (cpu) { 330 gnet_stats_add_queue_cpu(qstats, cpu); 331 } else { 332 qstats->qlen += q->qlen; 333 qstats->backlog += q->backlog; 334 qstats->drops += q->drops; 335 qstats->requeues += q->requeues; 336 qstats->overlimits += q->overlimits; 337 } 338 } 339 EXPORT_SYMBOL(gnet_stats_add_queue); 340 341 /** 342 * gnet_stats_copy_queue - copy queue statistics into statistics TLV 343 * @d: dumping handle 344 * @cpu_q: per cpu queue statistics 345 * @q: queue statistics 346 * @qlen: queue length statistics 347 * 348 * Appends the queue statistics to the top level TLV created by 349 * gnet_stats_start_copy(). Using per cpu queue statistics if 350 * they are available. 351 * 352 * Returns 0 on success or -1 with the statistic lock released 353 * if the room in the socket buffer was not sufficient. 354 */ 355 int 356 gnet_stats_copy_queue(struct gnet_dump *d, 357 struct gnet_stats_queue __percpu *cpu_q, 358 struct gnet_stats_queue *q, __u32 qlen) 359 { 360 struct gnet_stats_queue qstats = {0}; 361 362 gnet_stats_add_queue(&qstats, cpu_q, q); 363 qstats.qlen = qlen; 364 365 if (d->compat_tc_stats) { 366 d->tc_stats.drops = qstats.drops; 367 d->tc_stats.qlen = qstats.qlen; 368 d->tc_stats.backlog = qstats.backlog; 369 d->tc_stats.overlimits = qstats.overlimits; 370 } 371 372 if (d->tail) 373 return gnet_stats_copy(d, TCA_STATS_QUEUE, 374 &qstats, sizeof(qstats), 375 TCA_STATS_PAD); 376 377 return 0; 378 } 379 EXPORT_SYMBOL(gnet_stats_copy_queue); 380 381 /** 382 * gnet_stats_copy_app - copy application specific statistics into statistics TLV 383 * @d: dumping handle 384 * @st: application specific statistics data 385 * @len: length of data 386 * 387 * Appends the application specific statistics to the top level TLV created by 388 * gnet_stats_start_copy() and remembers the data for XSTATS if the dumping 389 * handle is in backward compatibility mode. 390 * 391 * Returns 0 on success or -1 with the statistic lock released 392 * if the room in the socket buffer was not sufficient. 393 */ 394 int 395 gnet_stats_copy_app(struct gnet_dump *d, void *st, int len) 396 { 397 if (d->compat_xstats) { 398 d->xstats = kmemdup(st, len, GFP_ATOMIC); 399 if (!d->xstats) 400 goto err_out; 401 d->xstats_len = len; 402 } 403 404 if (d->tail) 405 return gnet_stats_copy(d, TCA_STATS_APP, st, len, 406 TCA_STATS_PAD); 407 408 return 0; 409 410 err_out: 411 if (d->lock) 412 spin_unlock_bh(d->lock); 413 d->xstats_len = 0; 414 return -1; 415 } 416 EXPORT_SYMBOL(gnet_stats_copy_app); 417 418 /** 419 * gnet_stats_finish_copy - finish dumping procedure 420 * @d: dumping handle 421 * 422 * Corrects the length of the top level TLV to include all TLVs added 423 * by gnet_stats_copy_XXX() calls. Adds the backward compatibility TLVs 424 * if gnet_stats_start_copy_compat() was used and releases the statistics 425 * lock. 426 * 427 * Returns 0 on success or -1 with the statistic lock released 428 * if the room in the socket buffer was not sufficient. 429 */ 430 int 431 gnet_stats_finish_copy(struct gnet_dump *d) 432 { 433 if (d->tail) 434 d->tail->nla_len = skb_tail_pointer(d->skb) - (u8 *)d->tail; 435 436 if (d->compat_tc_stats) 437 if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats, 438 sizeof(d->tc_stats), d->padattr) < 0) 439 return -1; 440 441 if (d->compat_xstats && d->xstats) { 442 if (gnet_stats_copy(d, d->compat_xstats, d->xstats, 443 d->xstats_len, d->padattr) < 0) 444 return -1; 445 } 446 447 if (d->lock) 448 spin_unlock_bh(d->lock); 449 kfree(d->xstats); 450 d->xstats = NULL; 451 d->xstats_len = 0; 452 return 0; 453 } 454 EXPORT_SYMBOL(gnet_stats_finish_copy); 455