Skip to content

Commit 794236c

Browse files
committed
perf/x86/intel: Support the PEBS event mask
JIRA: https://issues.redhat.com/browse/RHEL-20059 upstream ======== commit a23eb2f Author: Kan Liang <kan.liang@linux.intel.com> Date: Wed Jun 26 07:35:33 2024 -0700 description =========== The current perf assumes that the counters that support PEBS are contiguous. But it's not guaranteed with the new leaf 0x23 introduced. The counters are enumerated with a counter mask. There may be holes in the counter mask for future platforms or in a virtualization environment. Store the PEBS event mask rather than the maximum number of PEBS counters in the x86 PMU structures. Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Andi Kleen <ak@linux.intel.com> Reviewed-by: Ian Rogers <irogers@google.com> Link: https://lkml.kernel.org/r/20240626143545.480761-2-kan.liang@linux.intel.com Signed-off-by: Michael Petlan <mpetlan@redhat.com>
1 parent 9063058 commit 794236c

File tree

4 files changed

+26
-13
lines changed

4 files changed

+26
-13
lines changed

arch/x86/events/intel/core.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4727,7 +4727,7 @@ static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu)
47274727
{
47284728
intel_pmu_check_num_counters(&pmu->num_counters, &pmu->num_counters_fixed,
47294729
&pmu->intel_ctrl, (1ULL << pmu->num_counters_fixed) - 1);
4730-
pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters);
4730+
pmu->pebs_events_mask = intel_pmu_pebs_mask(GENMASK_ULL(pmu->num_counters - 1, 0));
47314731
pmu->unconstrained = (struct event_constraint)
47324732
__EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
47334733
0, pmu->num_counters, 0, 0);
@@ -6076,7 +6076,7 @@ static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)
60766076

60776077
pmu->num_counters = x86_pmu.num_counters;
60786078
pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
6079-
pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters);
6079+
pmu->pebs_events_mask = intel_pmu_pebs_mask(GENMASK_ULL(pmu->num_counters - 1, 0));
60806080
pmu->unconstrained = (struct event_constraint)
60816081
__EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
60826082
0, pmu->num_counters, 0, 0);
@@ -6199,7 +6199,7 @@ __init int intel_pmu_init(void)
61996199
x86_pmu.events_maskl = ebx.full;
62006200
x86_pmu.events_mask_len = eax.split.mask_length;
62016201

6202-
x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
6202+
x86_pmu.pebs_events_mask = intel_pmu_pebs_mask(GENMASK_ULL(x86_pmu.num_counters - 1, 0));
62036203
x86_pmu.pebs_capable = PEBS_COUNTER_MASK;
62046204

62056205
/*
@@ -6828,7 +6828,7 @@ __init int intel_pmu_init(void)
68286828
pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
68296829
}
68306830

6831-
pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters);
6831+
pmu->pebs_events_mask = intel_pmu_pebs_mask(GENMASK_ULL(pmu->num_counters - 1, 0));
68326832
pmu->unconstrained = (struct event_constraint)
68336833
__EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
68346834
0, pmu->num_counters, 0, 0);

arch/x86/events/intel/ds.c

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1136,7 +1136,7 @@ void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sche
11361136
static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
11371137
{
11381138
struct debug_store *ds = cpuc->ds;
1139-
int max_pebs_events = hybrid(cpuc->pmu, max_pebs_events);
1139+
int max_pebs_events = intel_pmu_max_num_pebs(cpuc->pmu);
11401140
int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
11411141
u64 threshold;
11421142
int reserved;
@@ -2156,6 +2156,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
21562156
void *base, *at, *top;
21572157
short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
21582158
short error[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
2159+
int max_pebs_events = intel_pmu_max_num_pebs(NULL);
21592160
int bit, i, size;
21602161
u64 mask;
21612162

@@ -2167,8 +2168,8 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
21672168

21682169
ds->pebs_index = ds->pebs_buffer_base;
21692170

2170-
mask = (1ULL << x86_pmu.max_pebs_events) - 1;
2171-
size = x86_pmu.max_pebs_events;
2171+
mask = x86_pmu.pebs_events_mask;
2172+
size = max_pebs_events;
21722173
if (x86_pmu.flags & PMU_FL_PEBS_ALL) {
21732174
mask |= ((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED;
21742175
size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
@@ -2207,8 +2208,9 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
22072208
pebs_status = p->status = cpuc->pebs_enabled;
22082209

22092210
bit = find_first_bit((unsigned long *)&pebs_status,
2210-
x86_pmu.max_pebs_events);
2211-
if (bit >= x86_pmu.max_pebs_events)
2211+
max_pebs_events);
2212+
2213+
if (!(x86_pmu.pebs_events_mask & (1 << bit)))
22122214
continue;
22132215

22142216
/*
@@ -2266,7 +2268,6 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
22662268
{
22672269
short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
22682270
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2269-
int max_pebs_events = hybrid(cpuc->pmu, max_pebs_events);
22702271
int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
22712272
struct debug_store *ds = cpuc->ds;
22722273
struct perf_event *event;
@@ -2282,7 +2283,7 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
22822283

22832284
ds->pebs_index = ds->pebs_buffer_base;
22842285

2285-
mask = ((1ULL << max_pebs_events) - 1) |
2286+
mask = hybrid(cpuc->pmu, pebs_events_mask) |
22862287
(((1ULL << num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED);
22872288
size = INTEL_PMC_IDX_FIXED + num_counters_fixed;
22882289

arch/x86/events/perf_event.h

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -684,7 +684,7 @@ struct x86_hybrid_pmu {
684684
cpumask_t supported_cpus;
685685
union perf_capabilities intel_cap;
686686
u64 intel_ctrl;
687-
int max_pebs_events;
687+
u64 pebs_events_mask;
688688
int num_counters;
689689
int num_counters_fixed;
690690
struct event_constraint unconstrained;
@@ -852,7 +852,7 @@ struct x86_pmu {
852852
pebs_ept :1;
853853
int pebs_record_size;
854854
int pebs_buffer_size;
855-
int max_pebs_events;
855+
u64 pebs_events_mask;
856856
void (*drain_pebs)(struct pt_regs *regs, struct perf_sample_data *data);
857857
struct event_constraint *pebs_constraints;
858858
void (*pebs_aliases)(struct perf_event *event);
@@ -1648,6 +1648,17 @@ static inline int is_ht_workaround_enabled(void)
16481648
return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
16491649
}
16501650

1651+
static inline u64 intel_pmu_pebs_mask(u64 cntr_mask)
1652+
{
1653+
return MAX_PEBS_EVENTS_MASK & cntr_mask;
1654+
}
1655+
1656+
static inline int intel_pmu_max_num_pebs(struct pmu *pmu)
1657+
{
1658+
static_assert(MAX_PEBS_EVENTS == 32);
1659+
return fls((u32)hybrid(pmu, pebs_events_mask));
1660+
}
1661+
16511662
#else /* CONFIG_CPU_SUP_INTEL */
16521663

16531664
static inline void reserve_ds_buffers(void)

arch/x86/include/asm/intel_ds.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
/* The maximal number of PEBS events: */
1010
#define MAX_PEBS_EVENTS_FMT4 8
1111
#define MAX_PEBS_EVENTS 32
12+
#define MAX_PEBS_EVENTS_MASK GENMASK_ULL(MAX_PEBS_EVENTS - 1, 0)
1213
#define MAX_FIXED_PEBS_EVENTS 16
1314

1415
/*

0 commit comments

Comments
 (0)