Lines Matching defs:hwc
119 struct hw_perf_event *hwc = &event->hw;
124 if (unlikely(!hwc->event_base))
134 prev_raw_count = local64_read(&hwc->prev_count);
136 rdpmcl(hwc->event_base_rdpmc, new_raw_count);
137 } while (!local64_try_cmpxchg(&hwc->prev_count,
152 local64_sub(delta, &hwc->period_left);
362 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
392 hwc->config |= val;
479 struct hw_perf_event *hwc = &event->hw;
483 hwc->sample_period = x86_pmu.max_period;
484 hwc->last_period = hwc->sample_period;
485 local64_set(&hwc->period_left, hwc->sample_period);
492 return set_ext_hw_attr(hwc, event);
510 hwc->config |= config;
685 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
695 if (is_counter_pair(hwc))
742 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
747 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
984 struct hw_perf_event *hwc;
1029 hwc = &cpuc->event_list[i]->hw;
1033 if (hwc->idx == -1)
1037 if (!test_bit(hwc->idx, c->idxmsk))
1040 mask = BIT_ULL(hwc->idx);
1041 if (is_counter_pair(hwc))
1051 assign[i] = hwc->idx;
1217 struct hw_perf_event *hwc = &event->hw;
1220 idx = hwc->idx = cpuc->assign[i];
1221 hwc->last_cpu = smp_processor_id();
1222 hwc->last_tag = ++cpuc->tags[i];
1226 switch (hwc->idx) {
1229 hwc->config_base = 0;
1230 hwc->event_base = 0;
1238 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1239 hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 +
1241 hwc->event_base_rdpmc = (idx - INTEL_PMC_IDX_FIXED) |
1246 hwc->config_base = x86_pmu_config_addr(hwc->idx);
1247 hwc->event_base = x86_pmu_event_addr(hwc->idx);
1248 hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx);
1274 static inline int match_prev_assignment(struct hw_perf_event *hwc,
1278 return hwc->idx == cpuc->assign[i] &&
1279 hwc->last_cpu == smp_processor_id() &&
1280 hwc->last_tag == cpuc->tags[i];
1289 struct hw_perf_event *hwc;
1308 hwc = &event->hw;
1316 if (hwc->idx == -1 ||
1317 match_prev_assignment(hwc, cpuc, i))
1324 if (hwc->state & PERF_HES_STOPPED)
1325 hwc->state |= PERF_HES_ARCH;
1335 hwc = &event->hw;
1337 if (!match_prev_assignment(hwc, cpuc, i))
1342 if (hwc->state & PERF_HES_ARCH)
1364 * Set the next IRQ period, based on the hwc->period_left value.
1369 struct hw_perf_event *hwc = &event->hw;
1370 s64 left = local64_read(&hwc->period_left);
1371 s64 period = hwc->sample_period;
1372 int ret = 0, idx = hwc->idx;
1374 if (unlikely(!hwc->event_base))
1382 local64_set(&hwc->period_left, left);
1383 hwc->last_period = period;
1389 local64_set(&hwc->period_left, left);
1390 hwc->last_period = period;
1410 local64_set(&hwc->prev_count, (u64)-left);
1412 wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
1418 if (is_counter_pair(hwc))
1442 struct hw_perf_event *hwc;
1446 hwc = &event->hw;
1453 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1455 hwc->state |= PERF_HES_ARCH;
1587 struct hw_perf_event *hwc = &event->hw;
1589 if (test_bit(hwc->idx, cpuc->active_mask)) {
1591 __clear_bit(hwc->idx, cpuc->active_mask);
1592 cpuc->events[hwc->idx] = NULL;
1593 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1594 hwc->state |= PERF_HES_STOPPED;
1597 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1603 hwc->state |= PERF_HES_UPTODATE;
2530 struct hw_perf_event *hwc = &event->hw;
2532 if (!(hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
2535 if (is_metric_idx(hwc->idx))
2538 return hwc->event_base_rdpmc + 1;