/openbmc/linux/drivers/infiniband/core/ |
H A D | uverbs_std_types_counters.c | 42 struct ib_counters *counters = uobject->object; in uverbs_free_counters() local 45 if (atomic_read(&counters->usecnt)) in uverbs_free_counters() 48 ret = counters->device->ops.destroy_counters(counters); in uverbs_free_counters() 51 kfree(counters); in uverbs_free_counters() 61 struct ib_counters *counters; in UVERBS_HANDLER() local 72 counters = rdma_zalloc_drv_obj(ib_dev, ib_counters); in UVERBS_HANDLER() 73 if (!counters) in UVERBS_HANDLER() 76 counters->device = ib_dev; in UVERBS_HANDLER() 77 counters->uobject = uobj; in UVERBS_HANDLER() 78 uobj->object = counters; in UVERBS_HANDLER() [all …]
|
/openbmc/linux/net/netfilter/ |
H A D | xt_connbytes.c | 30 const struct nf_conn_counter *counters; in connbytes_mt() local 40 counters = acct->counter; in connbytes_mt() 45 what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets); in connbytes_mt() 48 what = atomic64_read(&counters[IP_CT_DIR_REPLY].packets); in connbytes_mt() 51 what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets); in connbytes_mt() 52 what += atomic64_read(&counters[IP_CT_DIR_REPLY].packets); in connbytes_mt() 59 what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes); in connbytes_mt() 62 what = atomic64_read(&counters[IP_CT_DIR_REPLY].bytes); in connbytes_mt() 65 what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes); in connbytes_mt() 66 what += atomic64_read(&counters[IP_CT_DIR_REPLY].bytes); in connbytes_mt() [all …]
|
/openbmc/linux/lib/ |
H A D | percpu_counter.c | 3 * Fast batching percpu counters. 67 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); in percpu_counter_set() 83 * the this_cpu_add(), and the interrupt updates this_cpu(*fbc->counters), 93 count = __this_cpu_read(*fbc->counters) + amount; in percpu_counter_add_batch() 97 __this_cpu_sub(*fbc->counters, count - amount); in percpu_counter_add_batch() 100 this_cpu_add(*fbc->counters, amount); in percpu_counter_add_batch() 118 count = __this_cpu_read(*fbc->counters); in percpu_counter_sync() 120 __this_cpu_sub(*fbc->counters, count); in percpu_counter_sync() 146 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); in __percpu_counter_sum() 160 s32 __percpu *counters; in __percpu_counter_init_many() local [all …]
|
/openbmc/linux/tools/perf/ |
H A D | design.txt | 2 Performance Counters for Linux 5 Performance counters are special hardware registers available on most modern 13 hardware capabilities. It provides per task and per CPU counters, counter 15 provides "virtual" 64-bit counters, regardless of the width of the 16 underlying hardware counters. 18 Performance counters are accessed via special file descriptors. 32 Multiple counters can be kept open at a time, and the counters 115 on all CPUs that implement Performance Counters support under Linux, 130 * Special "software" counters provided by the kernel, even if the hardware 131 * does not support performance counters. These counters measure various [all …]
|
/openbmc/linux/Documentation/arch/arm64/ |
H A D | amu.rst | 22 counters intended for system management use. The AMU extension provides a 27 of four fixed and architecturally defined 64-bit event counters. 37 When in WFI or WFE these counters do not increment. 40 event counters. Future versions of the architecture may use this space to 41 implement additional architected event counters. 44 64-bit event counters. 46 On cold reset all counters reset to 0. 59 counters, only the presence of the extension. 66 - Enable the counters. If not enabled these will read as 0. 67 - Save/restore the counters before/after the CPU is being put/brought up [all …]
|
/openbmc/linux/Documentation/translations/zh_CN/core-api/ |
H A D | local_ops.rst | 93 static DEFINE_PER_CPU(local_t, counters) = LOCAL_INIT(0); 105 local_inc(&get_cpu_var(counters)); 106 put_cpu_var(counters); 110 local_inc(this_cpu_ptr(&counters)); 123 sum += local_read(&per_cpu(counters, cpu)); 143 static DEFINE_PER_CPU(local_t, counters) = LOCAL_INIT(0); 152 local_inc(this_cpu_ptr(&counters)); 157 * local_inc(&get_cpu_var(counters)); 158 * put_cpu_var(counters); 166 /* Increment the counters */ [all …]
|
/openbmc/linux/Documentation/core-api/ |
H A D | local_ops.rst | 30 counters. They minimize the performance cost of standard atomic operations by 34 Having fast per CPU atomic counters is interesting in many cases: it does not 36 coherent counters in NMI handlers. It is especially useful for tracing purposes 37 and for various performance monitoring counters. 95 static DEFINE_PER_CPU(local_t, counters) = LOCAL_INIT(0); 107 local_inc(&get_cpu_var(counters)); 108 put_cpu_var(counters); 113 local_inc(this_cpu_ptr(&counters)); 117 Reading the counters 120 Those local counters can be read from foreign CPUs to sum the count. Note that [all …]
|
/openbmc/linux/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/ |
H A D | counters.rst | 5 Ethtool counters 22 addition, each group of counters may have different counter types. 53 | Uplink (no counters) | 58 | MPFS (no counters) | 68 Software counters populated by the driver stack. 71 An aggregation of software ring counters. 73 vPort counters 74 Traffic counters and drops due to steering or no buffers. May indicate issues 75 with NIC. These counters include Ethernet traffic counters (including Raw 76 Ethernet) and RDMA/RoCE traffic counters. [all …]
|
/openbmc/linux/drivers/net/ethernet/aquantia/atlantic/macsec/ |
H A D | macsec_api.h | 265 /*! Read the counters for the specified SC, and unpack them into the 266 * fields of counters. 267 * counters - [OUT] The raw table row data will be unpacked here. 271 struct aq_mss_egress_sc_counters *counters, 274 /*! Read the counters for the specified SA, and unpack them into the 275 * fields of counters. 276 * counters - [OUT] The raw table row data will be unpacked here. 280 struct aq_mss_egress_sa_counters *counters, 283 /*! Read the counters for the common egress counters, and unpack them 284 * into the fields of counters. [all …]
|
H A D | macsec_api.c | 1825 struct aq_mss_egress_sc_counters *counters, in get_egress_sc_counters() argument 1837 counters->sc_protected_pkts[0] = in get_egress_sc_counters() 1839 counters->sc_protected_pkts[1] = in get_egress_sc_counters() 1845 counters->sc_encrypted_pkts[0] = in get_egress_sc_counters() 1847 counters->sc_encrypted_pkts[1] = in get_egress_sc_counters() 1853 counters->sc_protected_octets[0] = in get_egress_sc_counters() 1855 counters->sc_protected_octets[1] = in get_egress_sc_counters() 1861 counters->sc_encrypted_octets[0] = in get_egress_sc_counters() 1863 counters->sc_encrypted_octets[1] = in get_egress_sc_counters() 1870 struct aq_mss_egress_sc_counters *counters, in aq_mss_get_egress_sc_counters() argument [all …]
|
/openbmc/linux/include/linux/ |
H A D | nfs_iostat.h | 13 * These counters are not meant to be human-readable, but are meant 15 * "iostat". As such, the counters are sampled by the tools over 28 * NFS byte counters 40 * These counters give a view of the data throughput into and out 46 * These counters can also help characterize which access methods 53 * NFS page counters 58 * NB: When adding new byte counters, please include the measured 75 * NFS event counters 77 * These counters provide a low-overhead way of monitoring client 78 * activity without enabling NFS trace debugging. The counters
|
/openbmc/linux/drivers/perf/amlogic/ |
H A D | meson_ddr_pmu_core.c | 23 struct dmc_counter counters; /* save counters from hw */ member 93 /* get the remain counters in register. */ in meson_ddr_perf_event_update() 96 ddr_cnt_addition(&sum_dc, &pmu->counters, &dc, chann_nr); in meson_ddr_perf_event_update() 147 memset(&pmu->counters, 0, sizeof(pmu->counters)); in meson_ddr_perf_event_start() 351 struct dmc_counter counters, *sum_cnter; in dmc_irq_handler() local 356 if (info->hw_info->irq_handler(info, &counters) != 0) in dmc_irq_handler() 359 sum_cnter = &pmu->counters; in dmc_irq_handler() 360 sum_cnter->all_cnt += counters.all_cnt; in dmc_irq_handler() 361 sum_cnter->all_req += counters.all_req; in dmc_irq_handler() 364 sum_cnter->channel_cnt[i] += counters.channel_cnt[i]; in dmc_irq_handler() [all …]
|
/openbmc/linux/tools/perf/tests/shell/ |
H A D | stat_bpf_counters.sh | 2 # perf stat --bpf-counters test 24 # skip if --bpf-counters is not supported 25 if ! perf stat -e cycles --bpf-counters true > /dev/null 2>&1; then 27 echo "Skipping: --bpf-counters not supported" 28 perf --no-pager stat -e cycles --bpf-counters true || true 38 bpf_cycles=$(perf stat --no-big-num --bpf-counters -e cycles -- perf bench sched messaging -g 1 -l … 40 echo "Failed: cycles not counted with --bpf-counters"
|
H A D | stat_bpf_counters_cgrp.sh | 2 # perf stat --bpf-counters --for-each-cgroup test 12 # skip if --bpf-counters --for-each-cgroup is not supported 15 if ! perf stat -a --bpf-counters --for-each-cgroup / true > /dev/null 2>&1; then 17 echo "Skipping: --bpf-counters --for-each-cgroup not supported" 18 perf --no-pager stat -a --bpf-counters --for-each-cgroup / true || true 51 …check_system_wide_counted_output=$(perf stat -a --bpf-counters --for-each-cgroup ${test_cgroups} -… 63 …check_cpu_list_counted_output=$(perf stat -C 0,1 --bpf-counters --for-each-cgroup ${test_cgroups} …
|
/openbmc/linux/fs/xfs/scrub/ |
H A D | fscounters.c | 27 * FS Summary Counters 33 * Then we compare what we computed against the in-core counters. 35 * However, the reality is that summary counters are a tricky beast to check. 46 * structures as quickly as it can. We snapshot the percpu counters before and 64 * values, the percpu counters should be fairly close to each other. However, 71 * contents and trust that the incore counters match the ondisk counters. (The 73 * summary counters after checking all AG headers). Do this from the setup 155 * that comprise the summary counters and compare them to the percpu counters. 229 /* We must get the incore counters set up before we can proceed. */ in xchk_setup_fscounters() 236 * reduce the likelihood of background perturbations to the counters in xchk_setup_fscounters() [all …]
|
/openbmc/linux/net/ipv4/netfilter/ |
H A D | arp_tables.c | 230 counter = xt_get_this_cpu_counter(&e->counters); in arpt_do_table() 319 e->counters.pcnt = pos; in mark_source_chains() 345 pos = e->counters.pcnt; in mark_source_chains() 346 e->counters.pcnt = 0; in mark_source_chains() 360 e->counters.pcnt = pos; in mark_source_chains() 379 e->counters.pcnt = pos; in mark_source_chains() 413 if (!xt_percpu_counter_alloc(alloc_state, &e->counters)) in find_check_entry() 432 xt_percpu_counter_free(&e->counters); in find_check_entry() 494 /* Clear counters and comefrom */ in check_entry_size_and_hooks() 495 e->counters = ((struct xt_counters) { 0, 0 }); in check_entry_size_and_hooks() [all …]
|
H A D | ip_tables.c | 297 counter = xt_get_this_cpu_counter(&e->counters); in ipt_do_table() 383 e->counters.pcnt = pos; in mark_source_chains() 407 pos = e->counters.pcnt; in mark_source_chains() 408 e->counters.pcnt = 0; in mark_source_chains() 422 e->counters.pcnt = pos; in mark_source_chains() 441 e->counters.pcnt = pos; in mark_source_chains() 526 if (!xt_percpu_counter_alloc(alloc_state, &e->counters)) in find_check_entry() 566 xt_percpu_counter_free(&e->counters); in find_check_entry() 629 /* Clear counters and comefrom */ in check_entry_size_and_hooks() 630 e->counters = ((struct xt_counters) { 0, 0 }); in check_entry_size_and_hooks() [all …]
|
/openbmc/linux/Documentation/admin-guide/perf/ |
H A D | alibaba_pmu.rst | 23 Each sub-channel has 36 PMU counters in total, which is classified into 26 - Group 0: PMU Cycle Counter. This group has one pair of counters 30 - Group 1: PMU Bandwidth Counters. This group has 8 counters that are used 32 selected rank, or four ranks separately in the first 4 counters. The base 35 - Group 2: PMU Retry Counters. This group has 10 counters, that intend to 38 - Group 3: PMU Common Counters. This group has 16 counters, that are used 41 For now, the Driveway PMU driver only uses counters in group 0 and group 3.
|
/openbmc/linux/net/ipv6/netfilter/ |
H A D | ip6_tables.c | 320 counter = xt_get_this_cpu_counter(&e->counters); in ip6t_do_table() 401 e->counters.pcnt = pos; in mark_source_chains() 425 pos = e->counters.pcnt; in mark_source_chains() 426 e->counters.pcnt = 0; in mark_source_chains() 440 e->counters.pcnt = pos; in mark_source_chains() 459 e->counters.pcnt = pos; in mark_source_chains() 545 if (!xt_percpu_counter_alloc(alloc_state, &e->counters)) in find_check_entry() 584 xt_percpu_counter_free(&e->counters); in find_check_entry() 647 /* Clear counters and comefrom */ in check_entry_size_and_hooks() 648 e->counters = ((struct xt_counters) { 0, 0 }); in check_entry_size_and_hooks() [all …]
|
/openbmc/linux/tools/perf/util/ |
H A D | values.c | 30 pr_debug("failed to allocate read_values counters arrays"); in perf_read_values_init() 33 values->counters = 0; in perf_read_values_init() 60 for (i = 0; i < values->counters; i++) in perf_read_values_destroy() 107 pr_debug("failed to allocate read_values counters array"); in perf_read_values__findnew_thread() 167 for (i = 0; i < values->counters; i++) in perf_read_values__findnew_counter() 171 if (values->counters == values->counters_max) { in perf_read_values__findnew_counter() 177 i = values->counters++; in perf_read_values__findnew_counter() 208 counterwidth = malloc(values->counters * sizeof(*counterwidth)); in perf_read_values__display_pretty() 215 for (j = 0; j < values->counters; j++) in perf_read_values__display_pretty() 226 for (j = 0; j < values->counters; j++) { in perf_read_values__display_pretty() [all …]
|
/openbmc/linux/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/ |
H A D | tc_flower_scale.sh | 11 # counters. 12 max_cnts=$(devlink_resource_size_get counters flow) 14 # Remove already allocated counters. 15 ((max_cnts -= $(devlink_resource_occ_get counters flow))) 17 # Each rule uses two counters, for packets and bytes.
|
/openbmc/linux/kernel/gcov/ |
H A D | gcc_base.c | 46 void __gcov_merge_add(gcov_type *counters, unsigned int n_counters) in __gcov_merge_add() argument 52 void __gcov_merge_single(gcov_type *counters, unsigned int n_counters) in __gcov_merge_single() argument 58 void __gcov_merge_delta(gcov_type *counters, unsigned int n_counters) in __gcov_merge_delta() argument 64 void __gcov_merge_ior(gcov_type *counters, unsigned int n_counters) in __gcov_merge_ior() argument 70 void __gcov_merge_time_profile(gcov_type *counters, unsigned int n_counters) in __gcov_merge_time_profile() argument 76 void __gcov_merge_icall_topn(gcov_type *counters, unsigned int n_counters) in __gcov_merge_icall_topn() argument
|
/openbmc/linux/Documentation/admin-guide/device-mapper/ |
H A D | statistics.rst | 14 The I/O statistics counters for each step-sized area of a region are 16 Documentation/admin-guide/iostats.rst). But two extra counters (12 and 13) are 19 histogram of latencies. All these counters may be accessed by sending 111 Clear all the counters except the in-flight i/o counters. 133 Print counters for each step-sized area of a region. 149 counters 151 The first 11 counters have the same meaning as 168 Additional counters: 174 Atomically print and then clear all the counters except the 175 in-flight i/o counters. Useful when the client consuming the
|
/openbmc/linux/drivers/md/ |
H A D | md-faulty.c | 78 atomic_t counters[Modes]; member 88 atomic_read(&conf->counters[mode]) <= 0) in check_mode() 92 if (atomic_dec_and_test(&conf->counters[mode])) { in check_mode() 94 atomic_set(&conf->counters[mode], conf->period[mode]); in check_mode() 171 if (atomic_read(&conf->counters[WriteAll])) { in faulty_make_request() 228 if ((n=atomic_read(&conf->counters[WriteTransient])) != 0) in faulty_status() 232 if ((n=atomic_read(&conf->counters[ReadTransient])) != 0) in faulty_status() 236 if ((n=atomic_read(&conf->counters[WritePersistent])) != 0) in faulty_status() 240 if ((n=atomic_read(&conf->counters[ReadPersistent])) != 0) in faulty_status() 245 if ((n=atomic_read(&conf->counters[ReadFixable])) != 0) in faulty_status() [all …]
|
/openbmc/u-boot/arch/arm/mach-tegra/ |
H A D | ivc.c | 26 * established state, indicating that has cleared the counters in our 33 * allowed to clear the counters it owns asynchronously with respect to 43 * return to the established state once it has cleared its counters. 151 * Invalid cases where the counters indicate that the queue is over in tegra_ivc_channel_full() 187 * transmit counters until we've acknowledged its synchronization in tegra_ivc_check_read() 340 * SYNC ACK reset counters; move to EST; notify 341 * SYNC SYNC reset counters; move to ACK; notify 344 * ACK SYNC reset counters; move to ACK; notify 347 * EST SYNC reset counters; move to ACK; notify 369 * Reset tx_channel counters. The remote end is in the SYNC in tegra_ivc_channel_notified() [all …]
|