1 /* 2 * kernel/sched/debug.c 3 * 4 * Print the CFS rbtree 5 * 6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/proc_fs.h> 14 #include <linux/sched.h> 15 #include <linux/seq_file.h> 16 #include <linux/kallsyms.h> 17 #include <linux/utsname.h> 18 #include <linux/mempolicy.h> 19 20 #include "sched.h" 21 22 static DEFINE_SPINLOCK(sched_debug_lock); 23 24 /* 25 * This allows printing both to /proc/sched_debug and 26 * to the console 27 */ 28 #define SEQ_printf(m, x...) \ 29 do { \ 30 if (m) \ 31 seq_printf(m, x); \ 32 else \ 33 printk(x); \ 34 } while (0) 35 36 /* 37 * Ease the printing of nsec fields: 38 */ 39 static long long nsec_high(unsigned long long nsec) 40 { 41 if ((long long)nsec < 0) { 42 nsec = -nsec; 43 do_div(nsec, 1000000); 44 return -nsec; 45 } 46 do_div(nsec, 1000000); 47 48 return nsec; 49 } 50 51 static unsigned long nsec_low(unsigned long long nsec) 52 { 53 if ((long long)nsec < 0) 54 nsec = -nsec; 55 56 return do_div(nsec, 1000000); 57 } 58 59 #define SPLIT_NS(x) nsec_high(x), nsec_low(x) 60 61 #ifdef CONFIG_FAIR_GROUP_SCHED 62 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg) 63 { 64 struct sched_entity *se = tg->se[cpu]; 65 66 #define P(F) \ 67 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F) 68 #define PN(F) \ 69 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F)) 70 71 if (!se) { 72 struct sched_avg *avg = &cpu_rq(cpu)->avg; 73 P(avg->runnable_avg_sum); 74 P(avg->avg_period); 75 return; 76 } 77 78 79 PN(se->exec_start); 80 PN(se->vruntime); 81 PN(se->sum_exec_runtime); 82 #ifdef CONFIG_SCHEDSTATS 83 PN(se->statistics.wait_start); 84 PN(se->statistics.sleep_start); 85 PN(se->statistics.block_start); 86 PN(se->statistics.sleep_max); 87 PN(se->statistics.block_max); 88 PN(se->statistics.exec_max); 89 PN(se->statistics.slice_max); 90 PN(se->statistics.wait_max); 91 PN(se->statistics.wait_sum); 92 P(se->statistics.wait_count); 93 #endif 94 P(se->load.weight); 95 #ifdef CONFIG_SMP 96 P(se->avg.runnable_avg_sum); 97 P(se->avg.running_avg_sum); 98 P(se->avg.avg_period); 99 P(se->avg.load_avg_contrib); 100 P(se->avg.utilization_avg_contrib); 101 P(se->avg.decay_count); 102 #endif 103 #undef PN 104 #undef P 105 } 106 #endif 107 108 #ifdef CONFIG_CGROUP_SCHED 109 static char group_path[PATH_MAX]; 110 111 static char *task_group_path(struct task_group *tg) 112 { 113 if (autogroup_path(tg, group_path, PATH_MAX)) 114 return group_path; 115 116 return cgroup_path(tg->css.cgroup, group_path, PATH_MAX); 117 } 118 #endif 119 120 static void 121 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) 122 { 123 if (rq->curr == p) 124 SEQ_printf(m, "R"); 125 else 126 SEQ_printf(m, " "); 127 128 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ", 129 p->comm, task_pid_nr(p), 130 SPLIT_NS(p->se.vruntime), 131 (long long)(p->nvcsw + p->nivcsw), 132 p->prio); 133 #ifdef CONFIG_SCHEDSTATS 134 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", 135 SPLIT_NS(p->se.vruntime), 136 SPLIT_NS(p->se.sum_exec_runtime), 137 SPLIT_NS(p->se.statistics.sum_sleep_runtime)); 138 #else 139 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", 140 0LL, 0L, 141 SPLIT_NS(p->se.sum_exec_runtime), 142 0LL, 0L); 143 #endif 144 #ifdef CONFIG_NUMA_BALANCING 145 SEQ_printf(m, " %d", task_node(p)); 146 #endif 147 #ifdef CONFIG_CGROUP_SCHED 148 SEQ_printf(m, " %s", task_group_path(task_group(p))); 149 #endif 150 151 SEQ_printf(m, "\n"); 152 } 153 154 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) 155 { 156 struct task_struct *g, *p; 157 158 SEQ_printf(m, 159 "\nrunnable tasks:\n" 160 " task PID tree-key switches prio" 161 " exec-runtime sum-exec sum-sleep\n" 162 "------------------------------------------------------" 163 "----------------------------------------------------\n"); 164 165 rcu_read_lock(); 166 for_each_process_thread(g, p) { 167 if (task_cpu(p) != rq_cpu) 168 continue; 169 170 print_task(m, rq, p); 171 } 172 rcu_read_unlock(); 173 } 174 175 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) 176 { 177 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, 178 spread, rq0_min_vruntime, spread0; 179 struct rq *rq = cpu_rq(cpu); 180 struct sched_entity *last; 181 unsigned long flags; 182 183 #ifdef CONFIG_FAIR_GROUP_SCHED 184 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg)); 185 #else 186 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu); 187 #endif 188 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", 189 SPLIT_NS(cfs_rq->exec_clock)); 190 191 raw_spin_lock_irqsave(&rq->lock, flags); 192 if (cfs_rq->rb_leftmost) 193 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime; 194 last = __pick_last_entity(cfs_rq); 195 if (last) 196 max_vruntime = last->vruntime; 197 min_vruntime = cfs_rq->min_vruntime; 198 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; 199 raw_spin_unlock_irqrestore(&rq->lock, flags); 200 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", 201 SPLIT_NS(MIN_vruntime)); 202 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime", 203 SPLIT_NS(min_vruntime)); 204 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime", 205 SPLIT_NS(max_vruntime)); 206 spread = max_vruntime - MIN_vruntime; 207 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", 208 SPLIT_NS(spread)); 209 spread0 = min_vruntime - rq0_min_vruntime; 210 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0", 211 SPLIT_NS(spread0)); 212 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over", 213 cfs_rq->nr_spread_over); 214 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); 215 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); 216 #ifdef CONFIG_SMP 217 SEQ_printf(m, " .%-30s: %ld\n", "runnable_load_avg", 218 cfs_rq->runnable_load_avg); 219 SEQ_printf(m, " .%-30s: %ld\n", "blocked_load_avg", 220 cfs_rq->blocked_load_avg); 221 SEQ_printf(m, " .%-30s: %ld\n", "utilization_load_avg", 222 cfs_rq->utilization_load_avg); 223 #ifdef CONFIG_FAIR_GROUP_SCHED 224 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_contrib", 225 cfs_rq->tg_load_contrib); 226 SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib", 227 cfs_rq->tg_runnable_contrib); 228 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg", 229 atomic_long_read(&cfs_rq->tg->load_avg)); 230 SEQ_printf(m, " .%-30s: %d\n", "tg->runnable_avg", 231 atomic_read(&cfs_rq->tg->runnable_avg)); 232 #endif 233 #endif 234 #ifdef CONFIG_CFS_BANDWIDTH 235 SEQ_printf(m, " .%-30s: %d\n", "tg->cfs_bandwidth.timer_active", 236 cfs_rq->tg->cfs_bandwidth.timer_active); 237 SEQ_printf(m, " .%-30s: %d\n", "throttled", 238 cfs_rq->throttled); 239 SEQ_printf(m, " .%-30s: %d\n", "throttle_count", 240 cfs_rq->throttle_count); 241 #endif 242 243 #ifdef CONFIG_FAIR_GROUP_SCHED 244 print_cfs_group_stats(m, cpu, cfs_rq->tg); 245 #endif 246 } 247 248 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) 249 { 250 #ifdef CONFIG_RT_GROUP_SCHED 251 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg)); 252 #else 253 SEQ_printf(m, "\nrt_rq[%d]:\n", cpu); 254 #endif 255 256 #define P(x) \ 257 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x)) 258 #define PN(x) \ 259 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x)) 260 261 P(rt_nr_running); 262 P(rt_throttled); 263 PN(rt_time); 264 PN(rt_runtime); 265 266 #undef PN 267 #undef P 268 } 269 270 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq) 271 { 272 SEQ_printf(m, "\ndl_rq[%d]:\n", cpu); 273 SEQ_printf(m, " .%-30s: %ld\n", "dl_nr_running", dl_rq->dl_nr_running); 274 } 275 276 extern __read_mostly int sched_clock_running; 277 278 static void print_cpu(struct seq_file *m, int cpu) 279 { 280 struct rq *rq = cpu_rq(cpu); 281 unsigned long flags; 282 283 #ifdef CONFIG_X86 284 { 285 unsigned int freq = cpu_khz ? : 1; 286 287 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n", 288 cpu, freq / 1000, (freq % 1000)); 289 } 290 #else 291 SEQ_printf(m, "cpu#%d\n", cpu); 292 #endif 293 294 #define P(x) \ 295 do { \ 296 if (sizeof(rq->x) == 4) \ 297 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \ 298 else \ 299 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\ 300 } while (0) 301 302 #define PN(x) \ 303 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x)) 304 305 P(nr_running); 306 SEQ_printf(m, " .%-30s: %lu\n", "load", 307 rq->load.weight); 308 P(nr_switches); 309 P(nr_load_updates); 310 P(nr_uninterruptible); 311 PN(next_balance); 312 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr))); 313 PN(clock); 314 PN(clock_task); 315 P(cpu_load[0]); 316 P(cpu_load[1]); 317 P(cpu_load[2]); 318 P(cpu_load[3]); 319 P(cpu_load[4]); 320 #undef P 321 #undef PN 322 323 #ifdef CONFIG_SCHEDSTATS 324 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n); 325 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n); 326 327 P(yld_count); 328 329 P(sched_count); 330 P(sched_goidle); 331 #ifdef CONFIG_SMP 332 P64(avg_idle); 333 P64(max_idle_balance_cost); 334 #endif 335 336 P(ttwu_count); 337 P(ttwu_local); 338 339 #undef P 340 #undef P64 341 #endif 342 spin_lock_irqsave(&sched_debug_lock, flags); 343 print_cfs_stats(m, cpu); 344 print_rt_stats(m, cpu); 345 print_dl_stats(m, cpu); 346 347 print_rq(m, rq, cpu); 348 spin_unlock_irqrestore(&sched_debug_lock, flags); 349 SEQ_printf(m, "\n"); 350 } 351 352 static const char *sched_tunable_scaling_names[] = { 353 "none", 354 "logaritmic", 355 "linear" 356 }; 357 358 static void sched_debug_header(struct seq_file *m) 359 { 360 u64 ktime, sched_clk, cpu_clk; 361 unsigned long flags; 362 363 local_irq_save(flags); 364 ktime = ktime_to_ns(ktime_get()); 365 sched_clk = sched_clock(); 366 cpu_clk = local_clock(); 367 local_irq_restore(flags); 368 369 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n", 370 init_utsname()->release, 371 (int)strcspn(init_utsname()->version, " "), 372 init_utsname()->version); 373 374 #define P(x) \ 375 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x)) 376 #define PN(x) \ 377 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) 378 PN(ktime); 379 PN(sched_clk); 380 PN(cpu_clk); 381 P(jiffies); 382 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 383 P(sched_clock_stable()); 384 #endif 385 #undef PN 386 #undef P 387 388 SEQ_printf(m, "\n"); 389 SEQ_printf(m, "sysctl_sched\n"); 390 391 #define P(x) \ 392 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x)) 393 #define PN(x) \ 394 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) 395 PN(sysctl_sched_latency); 396 PN(sysctl_sched_min_granularity); 397 PN(sysctl_sched_wakeup_granularity); 398 P(sysctl_sched_child_runs_first); 399 P(sysctl_sched_features); 400 #undef PN 401 #undef P 402 403 SEQ_printf(m, " .%-40s: %d (%s)\n", 404 "sysctl_sched_tunable_scaling", 405 sysctl_sched_tunable_scaling, 406 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]); 407 SEQ_printf(m, "\n"); 408 } 409 410 static int sched_debug_show(struct seq_file *m, void *v) 411 { 412 int cpu = (unsigned long)(v - 2); 413 414 if (cpu != -1) 415 print_cpu(m, cpu); 416 else 417 sched_debug_header(m); 418 419 return 0; 420 } 421 422 void sysrq_sched_debug_show(void) 423 { 424 int cpu; 425 426 sched_debug_header(NULL); 427 for_each_online_cpu(cpu) 428 print_cpu(NULL, cpu); 429 430 } 431 432 /* 433 * This itererator needs some explanation. 434 * It returns 1 for the header position. 435 * This means 2 is cpu 0. 436 * In a hotplugged system some cpus, including cpu 0, may be missing so we have 437 * to use cpumask_* to iterate over the cpus. 438 */ 439 static void *sched_debug_start(struct seq_file *file, loff_t *offset) 440 { 441 unsigned long n = *offset; 442 443 if (n == 0) 444 return (void *) 1; 445 446 n--; 447 448 if (n > 0) 449 n = cpumask_next(n - 1, cpu_online_mask); 450 else 451 n = cpumask_first(cpu_online_mask); 452 453 *offset = n + 1; 454 455 if (n < nr_cpu_ids) 456 return (void *)(unsigned long)(n + 2); 457 return NULL; 458 } 459 460 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset) 461 { 462 (*offset)++; 463 return sched_debug_start(file, offset); 464 } 465 466 static void sched_debug_stop(struct seq_file *file, void *data) 467 { 468 } 469 470 static const struct seq_operations sched_debug_sops = { 471 .start = sched_debug_start, 472 .next = sched_debug_next, 473 .stop = sched_debug_stop, 474 .show = sched_debug_show, 475 }; 476 477 static int sched_debug_release(struct inode *inode, struct file *file) 478 { 479 seq_release(inode, file); 480 481 return 0; 482 } 483 484 static int sched_debug_open(struct inode *inode, struct file *filp) 485 { 486 int ret = 0; 487 488 ret = seq_open(filp, &sched_debug_sops); 489 490 return ret; 491 } 492 493 static const struct file_operations sched_debug_fops = { 494 .open = sched_debug_open, 495 .read = seq_read, 496 .llseek = seq_lseek, 497 .release = sched_debug_release, 498 }; 499 500 static int __init init_sched_debug_procfs(void) 501 { 502 struct proc_dir_entry *pe; 503 504 pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops); 505 if (!pe) 506 return -ENOMEM; 507 return 0; 508 } 509 510 __initcall(init_sched_debug_procfs); 511 512 #define __P(F) \ 513 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F) 514 #define P(F) \ 515 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F) 516 #define __PN(F) \ 517 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F)) 518 #define PN(F) \ 519 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F)) 520 521 522 static void sched_show_numa(struct task_struct *p, struct seq_file *m) 523 { 524 #ifdef CONFIG_NUMA_BALANCING 525 struct mempolicy *pol; 526 int node, i; 527 528 if (p->mm) 529 P(mm->numa_scan_seq); 530 531 task_lock(p); 532 pol = p->mempolicy; 533 if (pol && !(pol->flags & MPOL_F_MORON)) 534 pol = NULL; 535 mpol_get(pol); 536 task_unlock(p); 537 538 SEQ_printf(m, "numa_migrations, %ld\n", xchg(&p->numa_pages_migrated, 0)); 539 540 for_each_online_node(node) { 541 for (i = 0; i < 2; i++) { 542 unsigned long nr_faults = -1; 543 int cpu_current, home_node; 544 545 if (p->numa_faults) 546 nr_faults = p->numa_faults[2*node + i]; 547 548 cpu_current = !i ? (task_node(p) == node) : 549 (pol && node_isset(node, pol->v.nodes)); 550 551 home_node = (p->numa_preferred_nid == node); 552 553 SEQ_printf(m, "numa_faults_memory, %d, %d, %d, %d, %ld\n", 554 i, node, cpu_current, home_node, nr_faults); 555 } 556 } 557 558 mpol_put(pol); 559 #endif 560 } 561 562 void proc_sched_show_task(struct task_struct *p, struct seq_file *m) 563 { 564 unsigned long nr_switches; 565 566 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p), 567 get_nr_threads(p)); 568 SEQ_printf(m, 569 "---------------------------------------------------------" 570 "----------\n"); 571 #define __P(F) \ 572 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F) 573 #define P(F) \ 574 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F) 575 #define __PN(F) \ 576 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F)) 577 #define PN(F) \ 578 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F)) 579 580 PN(se.exec_start); 581 PN(se.vruntime); 582 PN(se.sum_exec_runtime); 583 584 nr_switches = p->nvcsw + p->nivcsw; 585 586 #ifdef CONFIG_SCHEDSTATS 587 PN(se.statistics.wait_start); 588 PN(se.statistics.sleep_start); 589 PN(se.statistics.block_start); 590 PN(se.statistics.sleep_max); 591 PN(se.statistics.block_max); 592 PN(se.statistics.exec_max); 593 PN(se.statistics.slice_max); 594 PN(se.statistics.wait_max); 595 PN(se.statistics.wait_sum); 596 P(se.statistics.wait_count); 597 PN(se.statistics.iowait_sum); 598 P(se.statistics.iowait_count); 599 P(se.nr_migrations); 600 P(se.statistics.nr_migrations_cold); 601 P(se.statistics.nr_failed_migrations_affine); 602 P(se.statistics.nr_failed_migrations_running); 603 P(se.statistics.nr_failed_migrations_hot); 604 P(se.statistics.nr_forced_migrations); 605 P(se.statistics.nr_wakeups); 606 P(se.statistics.nr_wakeups_sync); 607 P(se.statistics.nr_wakeups_migrate); 608 P(se.statistics.nr_wakeups_local); 609 P(se.statistics.nr_wakeups_remote); 610 P(se.statistics.nr_wakeups_affine); 611 P(se.statistics.nr_wakeups_affine_attempts); 612 P(se.statistics.nr_wakeups_passive); 613 P(se.statistics.nr_wakeups_idle); 614 615 { 616 u64 avg_atom, avg_per_cpu; 617 618 avg_atom = p->se.sum_exec_runtime; 619 if (nr_switches) 620 avg_atom = div64_ul(avg_atom, nr_switches); 621 else 622 avg_atom = -1LL; 623 624 avg_per_cpu = p->se.sum_exec_runtime; 625 if (p->se.nr_migrations) { 626 avg_per_cpu = div64_u64(avg_per_cpu, 627 p->se.nr_migrations); 628 } else { 629 avg_per_cpu = -1LL; 630 } 631 632 __PN(avg_atom); 633 __PN(avg_per_cpu); 634 } 635 #endif 636 __P(nr_switches); 637 SEQ_printf(m, "%-45s:%21Ld\n", 638 "nr_voluntary_switches", (long long)p->nvcsw); 639 SEQ_printf(m, "%-45s:%21Ld\n", 640 "nr_involuntary_switches", (long long)p->nivcsw); 641 642 P(se.load.weight); 643 #ifdef CONFIG_SMP 644 P(se.avg.runnable_avg_sum); 645 P(se.avg.running_avg_sum); 646 P(se.avg.avg_period); 647 P(se.avg.load_avg_contrib); 648 P(se.avg.utilization_avg_contrib); 649 P(se.avg.decay_count); 650 #endif 651 P(policy); 652 P(prio); 653 #undef PN 654 #undef __PN 655 #undef P 656 #undef __P 657 658 { 659 unsigned int this_cpu = raw_smp_processor_id(); 660 u64 t0, t1; 661 662 t0 = cpu_clock(this_cpu); 663 t1 = cpu_clock(this_cpu); 664 SEQ_printf(m, "%-45s:%21Ld\n", 665 "clock-delta", (long long)(t1-t0)); 666 } 667 668 sched_show_numa(p, m); 669 } 670 671 void proc_sched_set_task(struct task_struct *p) 672 { 673 #ifdef CONFIG_SCHEDSTATS 674 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 675 #endif 676 } 677