Skip to content

Commit f8df957

Browse files
committed
perf/x86/intel: Avoid disable PMU if !cpuc->enabled in sample read
JIRA: https://issues.redhat.com/browse/RHEL-47444 upstream ======== commit f9bdf1f Author: Kan Liang <kan.liang@linux.intel.com> Date: Tue Jan 21 07:23:01 2025 -0800 description =========== The WARN_ON(this_cpu_read(cpu_hw_events.enabled)) in the intel_pmu_save_and_restart_reload() is triggered, when sampling read topdown events. In a NMI handler, the cpu_hw_events.enabled is set and used to indicate the status of core PMU. The generic pmu->pmu_disable_count, updated in the perf_pmu_disable/enable pair, is not touched. However, the perf_pmu_disable/enable pair is invoked when sampling read in a NMI handler. The cpuc->enabled is mistakenly set by the perf_pmu_enable(). Avoid disabling PMU if the core PMU is already disabled. Merge the logic together. Fixes: 7b2c05a ("perf/x86/intel: Generic support for hardware TopDown metrics") Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20250121152303.3128733-2-kan.liang@linux.intel.com Signed-off-by: Michael Petlan <mpetlan@redhat.com>
1 parent 326fbde commit f8df957

File tree

3 files changed

+25
-29
lines changed

3 files changed

+25
-29
lines changed

arch/x86/events/intel/core.c

Lines changed: 23 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -2774,28 +2774,33 @@ static u64 icl_update_topdown_event(struct perf_event *event)
27742774

27752775
DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update);
27762776

2777-
static void intel_pmu_read_topdown_event(struct perf_event *event)
2777+
static void intel_pmu_read_event(struct perf_event *event)
27782778
{
2779-
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2779+
if (event->hw.flags & (PERF_X86_EVENT_AUTO_RELOAD | PERF_X86_EVENT_TOPDOWN)) {
2780+
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2781+
bool pmu_enabled = cpuc->enabled;
27802782

2781-
/* Only need to call update_topdown_event() once for group read. */
2782-
if ((cpuc->txn_flags & PERF_PMU_TXN_READ) &&
2783-
!is_slots_event(event))
2784-
return;
2783+
/* Only need to call update_topdown_event() once for group read. */
2784+
if (is_metric_event(event) && (cpuc->txn_flags & PERF_PMU_TXN_READ))
2785+
return;
27852786

2786-
perf_pmu_disable(event->pmu);
2787-
static_call(intel_pmu_update_topdown_event)(event);
2788-
perf_pmu_enable(event->pmu);
2789-
}
2787+
cpuc->enabled = 0;
2788+
if (pmu_enabled)
2789+
intel_pmu_disable_all();
27902790

2791-
static void intel_pmu_read_event(struct perf_event *event)
2792-
{
2793-
if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2794-
intel_pmu_auto_reload_read(event);
2795-
else if (is_topdown_count(event))
2796-
intel_pmu_read_topdown_event(event);
2797-
else
2798-
x86_perf_event_update(event);
2791+
if (is_topdown_event(event))
2792+
static_call(intel_pmu_update_topdown_event)(event);
2793+
else
2794+
intel_pmu_drain_pebs_buffer();
2795+
2796+
cpuc->enabled = pmu_enabled;
2797+
if (pmu_enabled)
2798+
intel_pmu_enable_all(0);
2799+
2800+
return;
2801+
}
2802+
2803+
x86_perf_event_update(event);
27992804
}
28002805

28012806
static void intel_pmu_enable_fixed(struct perf_event *event)

arch/x86/events/intel/ds.c

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -952,7 +952,7 @@ int intel_pmu_drain_bts_buffer(void)
952952
return 1;
953953
}
954954

955-
static inline void intel_pmu_drain_pebs_buffer(void)
955+
void intel_pmu_drain_pebs_buffer(void)
956956
{
957957
struct perf_sample_data data;
958958

@@ -2099,15 +2099,6 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit)
20992099
return NULL;
21002100
}
21012101

2102-
void intel_pmu_auto_reload_read(struct perf_event *event)
2103-
{
2104-
WARN_ON(!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD));
2105-
2106-
perf_pmu_disable(event->pmu);
2107-
intel_pmu_drain_pebs_buffer();
2108-
perf_pmu_enable(event->pmu);
2109-
}
2110-
21112102
/*
21122103
* Special variant of intel_pmu_save_and_restart() for auto-reload.
21132104
*/

arch/x86/events/perf_event.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1642,7 +1642,7 @@ void intel_pmu_pebs_disable_all(void);
16421642

16431643
void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
16441644

1645-
void intel_pmu_auto_reload_read(struct perf_event *event);
1645+
void intel_pmu_drain_pebs_buffer(void);
16461646

16471647
void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr);
16481648

0 commit comments

Comments
 (0)