Skip to content

Commit f47eff1

Browse files
committed
perf/x86/intel: Support Perfmon MSRs aliasing
JIRA: https://issues.redhat.com/browse/RHEL-20059 upstream ======== commit 149fd47 Author: Kan Liang <kan.liang@linux.intel.com> Date: Wed Jun 26 07:35:40 2024 -0700 description =========== The architectural performance monitoring V6 supports a new range of counters' MSRs in the 19xxH address range. They include all the GP counter MSRs, the GP control MSRs, and the fixed counter MSRs. The step between each sibling counter is 4. Add intel_pmu_addr_offset() to calculate the correct offset. Add fixedctr in struct x86_pmu to store the address of the fixed counter 0. It can be used to calculate the rest of the fixed counters. The MSR address of the fixed counter control is not changed. Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Andi Kleen <ak@linux.intel.com> Reviewed-by: Ian Rogers <irogers@google.com> Link: https://lkml.kernel.org/r/20240626143545.480761-9-kan.liang@linux.intel.com Signed-off-by: Michael Petlan <mpetlan@redhat.com>
1 parent 80dac20 commit f47eff1

File tree

4 files changed

+32
-5
lines changed

4 files changed

+32
-5
lines changed

arch/x86/events/core.c

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1236,8 +1236,7 @@ static inline void x86_assign_hw_event(struct perf_event *event,
12361236
fallthrough;
12371237
case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS-1:
12381238
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1239-
hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 +
1240-
(idx - INTEL_PMC_IDX_FIXED);
1239+
hwc->event_base = x86_pmu_fixed_ctr_addr(idx - INTEL_PMC_IDX_FIXED);
12411240
hwc->event_base_rdpmc = (idx - INTEL_PMC_IDX_FIXED) |
12421241
INTEL_PMC_FIXED_RDPMC_BASE;
12431242
break;
@@ -1573,7 +1572,7 @@ void perf_event_print_debug(void)
15731572
for_each_set_bit(idx, fixed_cntr_mask, X86_PMC_IDX_MAX) {
15741573
if (fixed_counter_disabled(idx, cpuc->pmu))
15751574
continue;
1576-
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1575+
rdmsrl(x86_pmu_fixed_ctr_addr(idx), pmc_count);
15771576

15781577
pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
15791578
cpu, idx, pmc_count);
@@ -2483,7 +2482,7 @@ void perf_clear_dirty_counters(void)
24832482
if (!test_bit(i - INTEL_PMC_IDX_FIXED, hybrid(cpuc->pmu, fixed_cntr_mask)))
24842483
continue;
24852484

2486-
wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + (i - INTEL_PMC_IDX_FIXED), 0);
2485+
wrmsrl(x86_pmu_fixed_ctr_addr(i - INTEL_PMC_IDX_FIXED), 0);
24872486
} else {
24882487
wrmsrl(x86_pmu_event_addr(i), 0);
24892488
}

arch/x86/events/intel/core.c

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2952,7 +2952,7 @@ static void intel_pmu_reset(void)
29522952
for_each_set_bit(idx, fixed_cntr_mask, INTEL_PMC_MAX_FIXED) {
29532953
if (fixed_counter_disabled(idx, cpuc->pmu))
29542954
continue;
2955-
wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
2955+
wrmsrl_safe(x86_pmu_fixed_ctr_addr(idx), 0ull);
29562956
}
29572957

29582958
if (ds)
@@ -5187,6 +5187,7 @@ static __initconst const struct x86_pmu core_pmu = {
51875187
.schedule_events = x86_schedule_events,
51885188
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
51895189
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
5190+
.fixedctr = MSR_ARCH_PERFMON_FIXED_CTR0,
51905191
.event_map = intel_pmu_event_map,
51915192
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
51925193
.apic = 1,
@@ -5240,6 +5241,7 @@ static __initconst const struct x86_pmu intel_pmu = {
52405241
.schedule_events = x86_schedule_events,
52415242
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
52425243
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
5244+
.fixedctr = MSR_ARCH_PERFMON_FIXED_CTR0,
52435245
.event_map = intel_pmu_event_map,
52445246
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
52455247
.apic = 1,
@@ -6182,6 +6184,11 @@ static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs)
61826184
}
61836185
}
61846186

6187+
static inline int intel_pmu_v6_addr_offset(int index, bool eventsel)
6188+
{
6189+
return MSR_IA32_PMC_V6_STEP * index;
6190+
}
6191+
61856192
static const struct { enum hybrid_pmu_type id; char *name; } intel_hybrid_pmu_type_map[] __initconst = {
61866193
{ hybrid_small, "cpu_atom" },
61876194
{ hybrid_big, "cpu_core" },
@@ -7156,6 +7163,14 @@ __init int intel_pmu_init(void)
71567163
pr_cont("full-width counters, ");
71577164
}
71587165

7166+
/* Support V6+ MSR Aliasing */
7167+
if (x86_pmu.version >= 6) {
7168+
x86_pmu.perfctr = MSR_IA32_PMC_V6_GP0_CTR;
7169+
x86_pmu.eventsel = MSR_IA32_PMC_V6_GP0_CFG_A;
7170+
x86_pmu.fixedctr = MSR_IA32_PMC_V6_FX0_CTR;
7171+
x86_pmu.addr_offset = intel_pmu_v6_addr_offset;
7172+
}
7173+
71597174
if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics)
71607175
x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
71617176

arch/x86/events/perf_event.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -787,6 +787,7 @@ struct x86_pmu {
787787
int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
788788
unsigned eventsel;
789789
unsigned perfctr;
790+
unsigned fixedctr;
790791
int (*addr_offset)(int index, bool eventsel);
791792
int (*rdpmc_index)(int index);
792793
u64 (*event_map)(int);
@@ -1144,6 +1145,12 @@ static inline unsigned int x86_pmu_event_addr(int index)
11441145
x86_pmu.addr_offset(index, false) : index);
11451146
}
11461147

1148+
static inline unsigned int x86_pmu_fixed_ctr_addr(int index)
1149+
{
1150+
return x86_pmu.fixedctr + (x86_pmu.addr_offset ?
1151+
x86_pmu.addr_offset(index, false) : index);
1152+
}
1153+
11471154
static inline int x86_pmu_rdpmc_index(int index)
11481155
{
11491156
return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;

arch/x86/include/asm/msr-index.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -556,6 +556,12 @@
556556
#define MSR_RELOAD_PMC0 0x000014c1
557557
#define MSR_RELOAD_FIXED_CTR0 0x00001309
558558

559+
/* V6 PMON MSR range */
560+
#define MSR_IA32_PMC_V6_GP0_CTR 0x1900
561+
#define MSR_IA32_PMC_V6_GP0_CFG_A 0x1901
562+
#define MSR_IA32_PMC_V6_FX0_CTR 0x1980
563+
#define MSR_IA32_PMC_V6_STEP 4
564+
559565
/* KeyID partitioning between MKTME and TDX */
560566
#define MSR_IA32_MKTME_KEYID_PARTITIONING 0x00000087
561567

0 commit comments

Comments
 (0)