Skip to content

Commit e2ee150

Browse files
committed
perf/x86/intel/ds: Factor out functions for PEBS records processing
JIRA: https://issues.redhat.com/browse/RHEL-47444 upstream ======== commit 3c00ed3 Author: Kan Liang <kan.liang@linux.intel.com> Date: Tue Nov 19 05:55:03 2024 -0800 description =========== Factor out functions to process normal and the last PEBS records, which can be shared with the later patch. Move the event updating related codes (intel_pmu_save_and_restart()) to the end, where all samples have been processed. For the current usage, it doesn't matter when perf updates event counts and reset the counter. Because all counters are stopped when the PEBS buffer is drained. Drop the return of the !intel_pmu_save_and_restart(event) check. Because it never happen. The intel_pmu_save_and_restart(event) only returns 0, when !hwc->event_base or the period_left > 0. - The !hwc->event_base is impossible for the PEBS event, since the PEBS event is only available on GP and fixed counters, which always have a valid hwc->event_base. - The check only happens for the case of non-AUTO_RELOAD and single PEBS, which implies that the event must be overflowed. The period_left must be always <= 0 for an overflowed event after the x86_pmu_update(). Co-developed-by: "Peter Zijlstra (Intel)" <peterz@infradead.org> Signed-off-by: "Peter Zijlstra (Intel)" <peterz@infradead.org> Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20241119135504.1463839-4-kan.liang@linux.intel.com Signed-off-by: Michael Petlan <mpetlan@redhat.com>
1 parent 1cbdb84 commit e2ee150

File tree

1 file changed

+67
-42
lines changed
  • arch/x86/events/intel

1 file changed

+67
-42
lines changed

arch/x86/events/intel/ds.c

Lines changed: 67 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -2157,46 +2157,33 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
21572157
return 0;
21582158
}
21592159

2160+
typedef void (*setup_fn)(struct perf_event *, struct pt_regs *, void *,
2161+
struct perf_sample_data *, struct pt_regs *);
2162+
2163+
static struct pt_regs dummy_iregs;
2164+
21602165
static __always_inline void
21612166
__intel_pmu_pebs_event(struct perf_event *event,
21622167
struct pt_regs *iregs,
2168+
struct pt_regs *regs,
21632169
struct perf_sample_data *data,
2164-
void *base, void *top,
2165-
int bit, int count,
2166-
void (*setup_sample)(struct perf_event *,
2167-
struct pt_regs *,
2168-
void *,
2169-
struct perf_sample_data *,
2170-
struct pt_regs *))
2170+
void *at,
2171+
setup_fn setup_sample)
21712172
{
2172-
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2173-
struct hw_perf_event *hwc = &event->hw;
2174-
struct x86_perf_regs perf_regs;
2175-
struct pt_regs *regs = &perf_regs.regs;
2176-
void *at = get_next_pebs_record_by_bit(base, top, bit);
2177-
static struct pt_regs dummy_iregs;
2178-
2179-
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
2180-
/*
2181-
* Now, auto-reload is only enabled in fixed period mode.
2182-
* The reload value is always hwc->sample_period.
2183-
* May need to change it, if auto-reload is enabled in
2184-
* freq mode later.
2185-
*/
2186-
intel_pmu_save_and_restart_reload(event, count);
2187-
} else if (!intel_pmu_save_and_restart(event))
2188-
return;
2189-
2190-
if (!iregs)
2191-
iregs = &dummy_iregs;
2173+
setup_sample(event, iregs, at, data, regs);
2174+
perf_event_output(event, data, regs);
2175+
}
21922176

2193-
while (count > 1) {
2194-
setup_sample(event, iregs, at, data, regs);
2195-
perf_event_output(event, data, regs);
2196-
at += cpuc->pebs_record_size;
2197-
at = get_next_pebs_record_by_bit(at, top, bit);
2198-
count--;
2199-
}
2177+
static __always_inline void
2178+
__intel_pmu_pebs_last_event(struct perf_event *event,
2179+
struct pt_regs *iregs,
2180+
struct pt_regs *regs,
2181+
struct perf_sample_data *data,
2182+
void *at,
2183+
int count,
2184+
setup_fn setup_sample)
2185+
{
2186+
struct hw_perf_event *hwc = &event->hw;
22002187

22012188
setup_sample(event, iregs, at, data, regs);
22022189
if (iregs == &dummy_iregs) {
@@ -2215,6 +2202,44 @@ __intel_pmu_pebs_event(struct perf_event *event,
22152202
if (perf_event_overflow(event, data, regs))
22162203
x86_pmu_stop(event, 0);
22172204
}
2205+
2206+
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
2207+
/*
2208+
* Now, auto-reload is only enabled in fixed period mode.
2209+
* The reload value is always hwc->sample_period.
2210+
* May need to change it, if auto-reload is enabled in
2211+
* freq mode later.
2212+
*/
2213+
intel_pmu_save_and_restart_reload(event, count);
2214+
} else
2215+
intel_pmu_save_and_restart(event);
2216+
}
2217+
2218+
static __always_inline void
2219+
__intel_pmu_pebs_events(struct perf_event *event,
2220+
struct pt_regs *iregs,
2221+
struct perf_sample_data *data,
2222+
void *base, void *top,
2223+
int bit, int count,
2224+
setup_fn setup_sample)
2225+
{
2226+
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2227+
struct x86_perf_regs perf_regs;
2228+
struct pt_regs *regs = &perf_regs.regs;
2229+
void *at = get_next_pebs_record_by_bit(base, top, bit);
2230+
int cnt = count;
2231+
2232+
if (!iregs)
2233+
iregs = &dummy_iregs;
2234+
2235+
while (cnt > 1) {
2236+
__intel_pmu_pebs_event(event, iregs, regs, data, at, setup_sample);
2237+
at += cpuc->pebs_record_size;
2238+
at = get_next_pebs_record_by_bit(at, top, bit);
2239+
cnt--;
2240+
}
2241+
2242+
__intel_pmu_pebs_last_event(event, iregs, regs, data, at, count, setup_sample);
22182243
}
22192244

22202245
static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_data *data)
@@ -2251,8 +2276,8 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_
22512276
return;
22522277
}
22532278

2254-
__intel_pmu_pebs_event(event, iregs, data, at, top, 0, n,
2255-
setup_pebs_fixed_sample_data);
2279+
__intel_pmu_pebs_events(event, iregs, data, at, top, 0, n,
2280+
setup_pebs_fixed_sample_data);
22562281
}
22572282

22582283
static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size)
@@ -2383,9 +2408,9 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
23832408
}
23842409

23852410
if (counts[bit]) {
2386-
__intel_pmu_pebs_event(event, iregs, data, base,
2387-
top, bit, counts[bit],
2388-
setup_pebs_fixed_sample_data);
2411+
__intel_pmu_pebs_events(event, iregs, data, base,
2412+
top, bit, counts[bit],
2413+
setup_pebs_fixed_sample_data);
23892414
}
23902415
}
23912416
}
@@ -2437,9 +2462,9 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
24372462
if (WARN_ON_ONCE(!event->attr.precise_ip))
24382463
continue;
24392464

2440-
__intel_pmu_pebs_event(event, iregs, data, base,
2441-
top, bit, counts[bit],
2442-
setup_pebs_adaptive_sample_data);
2465+
__intel_pmu_pebs_events(event, iregs, data, base,
2466+
top, bit, counts[bit],
2467+
setup_pebs_adaptive_sample_data);
24432468
}
24442469
}
24452470

0 commit comments

Comments
 (0)