Skip to content

Commit 80ab016

Browse files
committed
Merge: perf: Sync with upstream v6.15
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-10/-/merge_requests/989 JIRA: https://issues.redhat.com/browse/RHEL-78197 Sync up the perf codebase with upstream v6.15. This makes the maintenance easier and also brings a lot of fixes and partner requests in. Signed-off-by: Michael Petlan <mpetlan@redhat.com> Approved-by: Rafael Aquini <raquini@redhat.com> Approved-by: David Arcari <darcari@redhat.com> Approved-by: ashelat <ashelat@redhat.com> Approved-by: Jarod Wilson <jarod@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: Jan Stancek <jstancek@redhat.com>
2 parents 9ec2304 + e54155d commit 80ab016

File tree

580 files changed

+41365
-10737
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

580 files changed

+41365
-10737
lines changed

arch/powerpc/perf/core-book3s.c

Lines changed: 26 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,10 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
132132

133133
static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
134134
static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
135-
static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) {}
135+
static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
136+
struct task_struct *task, bool sched_in)
137+
{
138+
}
136139
static inline void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) {}
137140
static void pmao_restore_workaround(bool ebb) { }
138141
#endif /* CONFIG_PPC32 */
@@ -444,7 +447,8 @@ static void power_pmu_bhrb_disable(struct perf_event *event)
444447
/* Called from ctxsw to prevent one process's branch entries to
445448
* mingle with the other process's entries during context switch.
446449
*/
447-
static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
450+
static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
451+
struct task_struct *task, bool sched_in)
448452
{
449453
if (!ppmu->bhrb_nr)
450454
return;
@@ -2222,6 +2226,10 @@ static struct pmu power_pmu = {
22222226
#define PERF_SAMPLE_ADDR_TYPE (PERF_SAMPLE_ADDR | \
22232227
PERF_SAMPLE_PHYS_ADDR | \
22242228
PERF_SAMPLE_DATA_PAGE_SIZE)
2229+
2230+
#define SIER_TYPE_SHIFT 15
2231+
#define SIER_TYPE_MASK (0x7ull << SIER_TYPE_SHIFT)
2232+
22252233
/*
22262234
* A counter has overflowed; update its count and record
22272235
* things if requested. Note that interrupts are hard-disabled
@@ -2290,6 +2298,22 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
22902298
is_kernel_addr(mfspr(SPRN_SIAR)))
22912299
record = 0;
22922300

2301+
/*
2302+
* SIER[46-48] presents instruction type of the sampled instruction.
2303+
* In ISA v3.0 and before values "0" and "7" are considered reserved.
2304+
* In ISA v3.1, value "7" has been used to indicate "larx/stcx".
2305+
* Drop the sample if "type" has reserved values for this field with a
2306+
* ISA version check.
2307+
*/
2308+
if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC &&
2309+
ppmu->get_mem_data_src) {
2310+
val = (regs->dar & SIER_TYPE_MASK) >> SIER_TYPE_SHIFT;
2311+
if (val == 0 || (val == 7 && !cpu_has_feature(CPU_FTR_ARCH_31))) {
2312+
record = 0;
2313+
atomic64_inc(&event->lost_samples);
2314+
}
2315+
}
2316+
22932317
/*
22942318
* Finally record data if requested.
22952319
*/

arch/powerpc/perf/isa207-common.c

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -319,10 +319,18 @@ void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags,
319319
return;
320320
}
321321

322-
sier = mfspr(SPRN_SIER);
322+
/*
323+
* Use regs-dar for SPRN_SIER which is saved
324+
* during perf_read_regs at the beginning
325+
* of the PMU interrupt handler to avoid multiple
326+
* reads of SPRN_SIER
327+
*/
328+
sier = regs->dar;
323329
val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
324-
if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31)))
330+
if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31))) {
331+
dsrc->val = 0;
325332
return;
333+
}
326334

327335
idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT;
328336
sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT;
@@ -338,8 +346,12 @@ void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags,
338346
* to determine the exact instruction type. If the sampling
339347
* criteria is neither load or store, set the type as default
340348
* to NA.
349+
*
350+
* Use regs->dsisr for MMCRA which is saved during perf_read_regs
351+
* at the beginning of the PMU interrupt handler to avoid
352+
* multiple reads of SPRN_MMCRA
341353
*/
342-
mmcra = mfspr(SPRN_MMCRA);
354+
mmcra = regs->dsisr;
343355

344356
op_type = (mmcra >> MMCRA_SAMP_ELIG_SHIFT) & MMCRA_SAMP_ELIG_MASK;
345357
switch (op_type) {

arch/s390/kernel/perf_cpum_cf.c

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -835,7 +835,7 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
835835
return validate_ctr_version(hwc->config, set);
836836
}
837837

838-
/* Events CPU_CYLCES and INSTRUCTIONS can be submitted with two different
838+
/* Events CPU_CYCLES and INSTRUCTIONS can be submitted with two different
839839
* attribute::type values:
840840
* - PERF_TYPE_HARDWARE:
841841
* - pmu->type:
@@ -858,18 +858,13 @@ static int cpumf_pmu_event_type(struct perf_event *event)
858858
static int cpumf_pmu_event_init(struct perf_event *event)
859859
{
860860
unsigned int type = event->attr.type;
861-
int err;
861+
int err = -ENOENT;
862862

863863
if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_RAW)
864864
err = __hw_perf_event_init(event, type);
865865
else if (event->pmu->type == type)
866866
/* Registered as unknown PMU */
867867
err = __hw_perf_event_init(event, cpumf_pmu_event_type(event));
868-
else
869-
return -ENOENT;
870-
871-
if (unlikely(err) && event->destroy)
872-
event->destroy(event);
873868

874869
return err;
875870
}
@@ -1819,8 +1814,6 @@ static int cfdiag_event_init(struct perf_event *event)
18191814
event->destroy = hw_perf_event_destroy;
18201815

18211816
err = cfdiag_event_init2(event);
1822-
if (unlikely(err))
1823-
event->destroy(event);
18241817
out:
18251818
return err;
18261819
}

arch/s390/kernel/perf_cpum_sf.c

Lines changed: 42 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -180,39 +180,27 @@ static int sf_buffer_available(struct cpu_hw_sf *cpuhw)
180180
*/
181181
static void free_sampling_buffer(struct sf_buffer *sfb)
182182
{
183-
unsigned long *sdbt, *curr;
184-
185-
if (!sfb->sdbt)
186-
return;
183+
unsigned long *sdbt, *curr, *head;
187184

188185
sdbt = sfb->sdbt;
189-
curr = sdbt;
190-
186+
if (!sdbt)
187+
return;
188+
sfb->sdbt = NULL;
191189
/* Free the SDBT after all SDBs are processed... */
192-
while (1) {
193-
if (!*curr || !sdbt)
194-
break;
195-
196-
/* Process table-link entries */
190+
head = sdbt;
191+
curr = sdbt;
192+
do {
197193
if (is_link_entry(curr)) {
194+
/* Process table-link entries */
198195
curr = get_next_sdbt(curr);
199-
if (sdbt)
200-
free_page((unsigned long)sdbt);
201-
202-
/* If the origin is reached, sampling buffer is freed */
203-
if (curr == sfb->sdbt)
204-
break;
205-
else
206-
sdbt = curr;
196+
free_page((unsigned long)sdbt);
197+
sdbt = curr;
207198
} else {
208199
/* Process SDB pointer */
209-
if (*curr) {
210-
free_page((unsigned long)phys_to_virt(*curr));
211-
curr++;
212-
}
200+
free_page((unsigned long)phys_to_virt(*curr));
201+
curr++;
213202
}
214-
}
215-
203+
} while (curr != head);
216204
memset(sfb, 0, sizeof(*sfb));
217205
}
218206

@@ -404,7 +392,7 @@ static void sfb_init_allocs(unsigned long num, struct hw_perf_event *hwc)
404392

405393
static void deallocate_buffers(struct cpu_hw_sf *cpuhw)
406394
{
407-
if (cpuhw->sfb.sdbt)
395+
if (sf_buffer_available(cpuhw))
408396
free_sampling_buffer(&cpuhw->sfb);
409397
}
410398

@@ -559,16 +547,15 @@ static void setup_pmc_cpu(void *flags)
559547
{
560548
struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
561549

550+
sf_disable();
562551
switch (*((int *)flags)) {
563552
case PMC_INIT:
564553
memset(cpuhw, 0, sizeof(*cpuhw));
565554
qsi(&cpuhw->qsi);
566555
cpuhw->flags |= PMU_F_RESERVED;
567-
sf_disable();
568556
break;
569557
case PMC_RELEASE:
570558
cpuhw->flags &= ~PMU_F_RESERVED;
571-
sf_disable();
572559
deallocate_buffers(cpuhw);
573560
break;
574561
}
@@ -759,7 +746,6 @@ static int __hw_perf_event_init(struct perf_event *event)
759746
reserve_pmc_hardware();
760747
refcount_set(&num_events, 1);
761748
}
762-
mutex_unlock(&pmc_reserve_mutex);
763749
event->destroy = hw_perf_event_destroy;
764750

765751
/* Access per-CPU sampling information (query sampling info) */
@@ -818,7 +804,7 @@ static int __hw_perf_event_init(struct perf_event *event)
818804

819805
/* Use AUX buffer. No need to allocate it by ourself */
820806
if (attr->config == PERF_EVENT_CPUM_SF_DIAG)
821-
return 0;
807+
goto out;
822808

823809
/* Allocate the per-CPU sampling buffer using the CPU information
824810
* from the event. If the event is not pinned to a particular
@@ -848,6 +834,7 @@ static int __hw_perf_event_init(struct perf_event *event)
848834
if (is_default_overflow_handler(event))
849835
event->overflow_handler = cpumsf_output_event_pid;
850836
out:
837+
mutex_unlock(&pmc_reserve_mutex);
851838
return err;
852839
}
853840

@@ -898,9 +885,6 @@ static int cpumsf_pmu_event_init(struct perf_event *event)
898885
event->attr.exclude_idle = 0;
899886

900887
err = __hw_perf_event_init(event);
901-
if (unlikely(err))
902-
if (event->destroy)
903-
event->destroy(event);
904888
return err;
905889
}
906890

@@ -910,10 +894,14 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
910894
struct hw_perf_event *hwc;
911895
int err;
912896

913-
if (cpuhw->flags & PMU_F_ENABLED)
914-
return;
915-
916-
if (cpuhw->flags & PMU_F_ERR_MASK)
897+
/*
898+
* Event must be
899+
* - added/started on this CPU (PMU_F_IN_USE set)
900+
* - and CPU must be available (PMU_F_RESERVED set)
901+
* - and not already enabled (PMU_F_ENABLED not set)
902+
* - and not in error condition (PMU_F_ERR_MASK not set)
903+
*/
904+
if (cpuhw->flags != (PMU_F_IN_USE | PMU_F_RESERVED))
917905
return;
918906

919907
/* Check whether to extent the sampling buffer.
@@ -927,33 +915,27 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
927915
* facility, but it can be fully re-enabled using sampling controls that
928916
* have been saved in cpumsf_pmu_disable().
929917
*/
930-
if (cpuhw->event) {
931-
hwc = &cpuhw->event->hw;
932-
if (!(SAMPL_DIAG_MODE(hwc))) {
933-
/*
934-
* Account number of overflow-designated
935-
* buffer extents
936-
*/
937-
sfb_account_overflows(cpuhw, hwc);
938-
extend_sampling_buffer(&cpuhw->sfb, hwc);
939-
}
940-
/* Rate may be adjusted with ioctl() */
941-
cpuhw->lsctl.interval = SAMPL_RATE(hwc);
918+
hwc = &cpuhw->event->hw;
919+
if (!(SAMPL_DIAG_MODE(hwc))) {
920+
/*
921+
* Account number of overflow-designated buffer extents
922+
*/
923+
sfb_account_overflows(cpuhw, hwc);
924+
extend_sampling_buffer(&cpuhw->sfb, hwc);
942925
}
926+
/* Rate may be adjusted with ioctl() */
927+
cpuhw->lsctl.interval = SAMPL_RATE(hwc);
943928

944929
/* (Re)enable the PMU and sampling facility */
945-
cpuhw->flags |= PMU_F_ENABLED;
946-
barrier();
947-
948930
err = lsctl(&cpuhw->lsctl);
949931
if (err) {
950-
cpuhw->flags &= ~PMU_F_ENABLED;
951932
pr_err("Loading sampling controls failed: op 1 err %i\n", err);
952933
return;
953934
}
954935

955936
/* Load current program parameter */
956937
lpp(&get_lowcore()->lpp);
938+
cpuhw->flags |= PMU_F_ENABLED;
957939
}
958940

959941
static void cpumsf_pmu_disable(struct pmu *pmu)
@@ -1774,7 +1756,9 @@ static void cpumsf_pmu_stop(struct perf_event *event, int flags)
17741756
event->hw.state |= PERF_HES_STOPPED;
17751757

17761758
if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
1777-
hw_perf_event_update(event, 1);
1759+
/* CPU hotplug off removes SDBs. No samples to extract. */
1760+
if (cpuhw->flags & PMU_F_RESERVED)
1761+
hw_perf_event_update(event, 1);
17781762
event->hw.state |= PERF_HES_UPTODATE;
17791763
}
17801764
perf_pmu_enable(event->pmu);
@@ -1789,7 +1773,7 @@ static int cpumsf_pmu_add(struct perf_event *event, int flags)
17891773
if (cpuhw->flags & PMU_F_IN_USE)
17901774
return -EAGAIN;
17911775

1792-
if (!SAMPL_DIAG_MODE(&event->hw) && !cpuhw->sfb.sdbt)
1776+
if (!SAMPL_DIAG_MODE(&event->hw) && !sf_buffer_available(cpuhw))
17931777
return -EINVAL;
17941778

17951779
perf_pmu_disable(event->pmu);
@@ -1951,13 +1935,12 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
19511935

19521936
/* Program alert request */
19531937
if (alert & CPU_MF_INT_SF_PRA) {
1954-
if (cpuhw->flags & PMU_F_IN_USE)
1938+
if (cpuhw->flags & PMU_F_IN_USE) {
19551939
if (SAMPL_DIAG_MODE(&cpuhw->event->hw))
19561940
hw_collect_aux(cpuhw);
19571941
else
19581942
hw_perf_event_update(cpuhw->event, 0);
1959-
else
1960-
WARN_ON_ONCE(!(cpuhw->flags & PMU_F_IN_USE));
1943+
}
19611944
}
19621945

19631946
/* Report measurement alerts only for non-PRA codes */
@@ -1978,7 +1961,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
19781961

19791962
/* Invalid sampling buffer entry */
19801963
if (alert & (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE)) {
1981-
pr_err("A sampling buffer entry is incorrect (alert=0x%x)\n",
1964+
pr_err("A sampling buffer entry is incorrect (alert=%#x)\n",
19821965
alert);
19831966
cpuhw->flags |= PMU_F_ERR_IBE;
19841967
sf_disable();

arch/s390/kernel/perf_pai_crypto.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -518,7 +518,8 @@ static void paicrypt_have_samples(void)
518518
/* Called on schedule-in and schedule-out. No access to event structure,
519519
* but for sampling only event CRYPTO_ALL is allowed.
520520
*/
521-
static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
521+
static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx,
522+
struct task_struct *task, bool sched_in)
522523
{
523524
/* We started with a clean page on event installation. So read out
524525
* results on schedule_out and if page was dirty, save old values.

arch/s390/kernel/perf_pai_ext.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -542,7 +542,8 @@ static void paiext_have_samples(void)
542542
/* Called on schedule-in and schedule-out. No access to event structure,
543543
* but for sampling only event NNPA_ALL is allowed.
544544
*/
545-
static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
545+
static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx,
546+
struct task_struct *task, bool sched_in)
546547
{
547548
/* We started with a clean page on event installation. So read out
548549
* results on schedule_out and if page was dirty, save old values.

arch/x86/events/amd/brs.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -381,7 +381,8 @@ static void amd_brs_poison_buffer(void)
381381
* On ctxswin, sched_in = true, called after the PMU has started
382382
* On ctxswout, sched_in = false, called before the PMU is stopped
383383
*/
384-
void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
384+
void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx,
385+
struct task_struct *task, bool sched_in)
385386
{
386387
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
387388

0 commit comments

Comments
 (0)