Skip to content

Commit 7ef5aa0

Browse files
Peter Zijlstragregkh
authored andcommitted
perf/core: Simplify the perf_event_alloc() error path
[ Upstream commit c70ca29 ] The error cleanup sequence in perf_event_alloc() is a subset of the existing _free_event() function (it must of course be). Split this out into __free_event() and simplify the error path. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Ravi Bangoria <ravi.bangoria@amd.com> Link: https://lore.kernel.org/r/20241104135517.967889521@infradead.org Stable-dep-of: 56799bc ("perf: Fix hang while freeing sigtrap event") Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent c61feda commit 7ef5aa0

File tree

2 files changed

+78
-76
lines changed

2 files changed

+78
-76
lines changed

include/linux/perf_event.h

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -673,13 +673,15 @@ struct swevent_hlist {
673673
struct rcu_head rcu_head;
674674
};
675675

676-
#define PERF_ATTACH_CONTEXT 0x01
677-
#define PERF_ATTACH_GROUP 0x02
678-
#define PERF_ATTACH_TASK 0x04
679-
#define PERF_ATTACH_TASK_DATA 0x08
680-
#define PERF_ATTACH_ITRACE 0x10
681-
#define PERF_ATTACH_SCHED_CB 0x20
682-
#define PERF_ATTACH_CHILD 0x40
676+
#define PERF_ATTACH_CONTEXT 0x0001
677+
#define PERF_ATTACH_GROUP 0x0002
678+
#define PERF_ATTACH_TASK 0x0004
679+
#define PERF_ATTACH_TASK_DATA 0x0008
680+
#define PERF_ATTACH_ITRACE 0x0010
681+
#define PERF_ATTACH_SCHED_CB 0x0020
682+
#define PERF_ATTACH_CHILD 0x0040
683+
#define PERF_ATTACH_EXCLUSIVE 0x0080
684+
#define PERF_ATTACH_CALLCHAIN 0x0100
683685

684686
struct bpf_prog;
685687
struct perf_cgroup;

kernel/events/core.c

Lines changed: 69 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -5262,21 +5262,22 @@ static int exclusive_event_init(struct perf_event *event)
52625262
return -EBUSY;
52635263
}
52645264

5265+
event->attach_state |= PERF_ATTACH_EXCLUSIVE;
5266+
52655267
return 0;
52665268
}
52675269

52685270
static void exclusive_event_destroy(struct perf_event *event)
52695271
{
52705272
struct pmu *pmu = event->pmu;
52715273

5272-
if (!is_exclusive_pmu(pmu))
5273-
return;
5274-
52755274
/* see comment in exclusive_event_init() */
52765275
if (event->attach_state & PERF_ATTACH_TASK)
52775276
atomic_dec(&pmu->exclusive_cnt);
52785277
else
52795278
atomic_inc(&pmu->exclusive_cnt);
5279+
5280+
event->attach_state &= ~PERF_ATTACH_EXCLUSIVE;
52805281
}
52815282

52825283
static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
@@ -5335,40 +5336,20 @@ static void perf_pending_task_sync(struct perf_event *event)
53355336
rcuwait_wait_event(&event->pending_work_wait, !event->pending_work, TASK_UNINTERRUPTIBLE);
53365337
}
53375338

5338-
static void _free_event(struct perf_event *event)
5339+
/* vs perf_event_alloc() error */
5340+
static void __free_event(struct perf_event *event)
53395341
{
5340-
irq_work_sync(&event->pending_irq);
5341-
irq_work_sync(&event->pending_disable_irq);
5342-
perf_pending_task_sync(event);
5342+
if (event->attach_state & PERF_ATTACH_CALLCHAIN)
5343+
put_callchain_buffers();
53435344

5344-
unaccount_event(event);
5345+
kfree(event->addr_filter_ranges);
53455346

5346-
security_perf_event_free(event);
5347-
5348-
if (event->rb) {
5349-
/*
5350-
* Can happen when we close an event with re-directed output.
5351-
*
5352-
* Since we have a 0 refcount, perf_mmap_close() will skip
5353-
* over us; possibly making our ring_buffer_put() the last.
5354-
*/
5355-
mutex_lock(&event->mmap_mutex);
5356-
ring_buffer_attach(event, NULL);
5357-
mutex_unlock(&event->mmap_mutex);
5358-
}
5347+
if (event->attach_state & PERF_ATTACH_EXCLUSIVE)
5348+
exclusive_event_destroy(event);
53595349

53605350
if (is_cgroup_event(event))
53615351
perf_detach_cgroup(event);
53625352

5363-
if (!event->parent) {
5364-
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
5365-
put_callchain_buffers();
5366-
}
5367-
5368-
perf_event_free_bpf_prog(event);
5369-
perf_addr_filters_splice(event, NULL);
5370-
kfree(event->addr_filter_ranges);
5371-
53725353
if (event->destroy)
53735354
event->destroy(event);
53745355

@@ -5379,22 +5360,58 @@ static void _free_event(struct perf_event *event)
53795360
if (event->hw.target)
53805361
put_task_struct(event->hw.target);
53815362

5382-
if (event->pmu_ctx)
5363+
if (event->pmu_ctx) {
5364+
/*
5365+
* put_pmu_ctx() needs an event->ctx reference, because of
5366+
* epc->ctx.
5367+
*/
5368+
WARN_ON_ONCE(!event->ctx);
5369+
WARN_ON_ONCE(event->pmu_ctx->ctx != event->ctx);
53835370
put_pmu_ctx(event->pmu_ctx);
5371+
}
53845372

53855373
/*
5386-
* perf_event_free_task() relies on put_ctx() being 'last', in particular
5387-
* all task references must be cleaned up.
5374+
* perf_event_free_task() relies on put_ctx() being 'last', in
5375+
* particular all task references must be cleaned up.
53885376
*/
53895377
if (event->ctx)
53905378
put_ctx(event->ctx);
53915379

5392-
exclusive_event_destroy(event);
5393-
module_put(event->pmu->module);
5380+
if (event->pmu)
5381+
module_put(event->pmu->module);
53945382

53955383
call_rcu(&event->rcu_head, free_event_rcu);
53965384
}
53975385

5386+
/* vs perf_event_alloc() success */
5387+
static void _free_event(struct perf_event *event)
5388+
{
5389+
irq_work_sync(&event->pending_irq);
5390+
irq_work_sync(&event->pending_disable_irq);
5391+
perf_pending_task_sync(event);
5392+
5393+
unaccount_event(event);
5394+
5395+
security_perf_event_free(event);
5396+
5397+
if (event->rb) {
5398+
/*
5399+
* Can happen when we close an event with re-directed output.
5400+
*
5401+
* Since we have a 0 refcount, perf_mmap_close() will skip
5402+
* over us; possibly making our ring_buffer_put() the last.
5403+
*/
5404+
mutex_lock(&event->mmap_mutex);
5405+
ring_buffer_attach(event, NULL);
5406+
mutex_unlock(&event->mmap_mutex);
5407+
}
5408+
5409+
perf_event_free_bpf_prog(event);
5410+
perf_addr_filters_splice(event, NULL);
5411+
5412+
__free_event(event);
5413+
}
5414+
53985415
/*
53995416
* Used to free events which have a known refcount of 1, such as in error paths
54005417
* where the event isn't exposed yet and inherited events.
@@ -12014,8 +12031,10 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
1201412031
event->destroy(event);
1201512032
}
1201612033

12017-
if (ret)
12034+
if (ret) {
12035+
event->pmu = NULL;
1201812036
module_put(pmu->module);
12037+
}
1201912038

1202012039
return ret;
1202112040
}
@@ -12343,15 +12362,15 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
1234312362
* See perf_output_read().
1234412363
*/
1234512364
if (has_inherit_and_sample_read(attr) && !(attr->sample_type & PERF_SAMPLE_TID))
12346-
goto err_ns;
12365+
goto err;
1234712366

1234812367
if (!has_branch_stack(event))
1234912368
event->attr.branch_sample_type = 0;
1235012369

1235112370
pmu = perf_init_event(event);
1235212371
if (IS_ERR(pmu)) {
1235312372
err = PTR_ERR(pmu);
12354-
goto err_ns;
12373+
goto err;
1235512374
}
1235612375

1235712376
/*
@@ -12361,46 +12380,46 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
1236112380
*/
1236212381
if (pmu->task_ctx_nr == perf_invalid_context && (task || cgroup_fd != -1)) {
1236312382
err = -EINVAL;
12364-
goto err_pmu;
12383+
goto err;
1236512384
}
1236612385

1236712386
if (event->attr.aux_output &&
1236812387
(!(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT) ||
1236912388
event->attr.aux_pause || event->attr.aux_resume)) {
1237012389
err = -EOPNOTSUPP;
12371-
goto err_pmu;
12390+
goto err;
1237212391
}
1237312392

1237412393
if (event->attr.aux_pause && event->attr.aux_resume) {
1237512394
err = -EINVAL;
12376-
goto err_pmu;
12395+
goto err;
1237712396
}
1237812397

1237912398
if (event->attr.aux_start_paused) {
1238012399
if (!(pmu->capabilities & PERF_PMU_CAP_AUX_PAUSE)) {
1238112400
err = -EOPNOTSUPP;
12382-
goto err_pmu;
12401+
goto err;
1238312402
}
1238412403
event->hw.aux_paused = 1;
1238512404
}
1238612405

1238712406
if (cgroup_fd != -1) {
1238812407
err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
1238912408
if (err)
12390-
goto err_pmu;
12409+
goto err;
1239112410
}
1239212411

1239312412
err = exclusive_event_init(event);
1239412413
if (err)
12395-
goto err_pmu;
12414+
goto err;
1239612415

1239712416
if (has_addr_filter(event)) {
1239812417
event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters,
1239912418
sizeof(struct perf_addr_filter_range),
1240012419
GFP_KERNEL);
1240112420
if (!event->addr_filter_ranges) {
1240212421
err = -ENOMEM;
12403-
goto err_per_task;
12422+
goto err;
1240412423
}
1240512424

1240612425
/*
@@ -12425,41 +12444,22 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
1242512444
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
1242612445
err = get_callchain_buffers(attr->sample_max_stack);
1242712446
if (err)
12428-
goto err_addr_filters;
12447+
goto err;
12448+
event->attach_state |= PERF_ATTACH_CALLCHAIN;
1242912449
}
1243012450
}
1243112451

1243212452
err = security_perf_event_alloc(event);
1243312453
if (err)
12434-
goto err_callchain_buffer;
12454+
goto err;
1243512455

1243612456
/* symmetric to unaccount_event() in _free_event() */
1243712457
account_event(event);
1243812458

1243912459
return event;
1244012460

12441-
err_callchain_buffer:
12442-
if (!event->parent) {
12443-
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
12444-
put_callchain_buffers();
12445-
}
12446-
err_addr_filters:
12447-
kfree(event->addr_filter_ranges);
12448-
12449-
err_per_task:
12450-
exclusive_event_destroy(event);
12451-
12452-
err_pmu:
12453-
if (is_cgroup_event(event))
12454-
perf_detach_cgroup(event);
12455-
if (event->destroy)
12456-
event->destroy(event);
12457-
module_put(pmu->module);
12458-
err_ns:
12459-
if (event->hw.target)
12460-
put_task_struct(event->hw.target);
12461-
call_rcu(&event->rcu_head, free_event_rcu);
12462-
12461+
err:
12462+
__free_event(event);
1246312463
return ERR_PTR(err);
1246412464
}
1246512465

0 commit comments

Comments
 (0)