@@ -2149,8 +2149,9 @@ perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event)
21492149}
21502150
21512151static void put_event (struct perf_event * event );
2152- static void event_sched_out (struct perf_event * event ,
2153- struct perf_event_context * ctx );
2152+ static void __event_disable (struct perf_event * event ,
2153+ struct perf_event_context * ctx ,
2154+ enum perf_event_state state );
21542155
21552156static void perf_put_aux_event (struct perf_event * event )
21562157{
@@ -2183,8 +2184,7 @@ static void perf_put_aux_event(struct perf_event *event)
21832184 * state so that we don't try to schedule it again. Note
21842185 * that perf_event_enable() will clear the ERROR status.
21852186 */
2186- event_sched_out (iter , ctx );
2187- perf_event_set_state (event , PERF_EVENT_STATE_ERROR );
2187+ __event_disable (iter , ctx , PERF_EVENT_STATE_ERROR );
21882188 }
21892189}
21902190
@@ -2242,18 +2242,6 @@ static inline struct list_head *get_event_list(struct perf_event *event)
22422242 & event -> pmu_ctx -> flexible_active ;
22432243}
22442244
2245- /*
2246- * Events that have PERF_EV_CAP_SIBLING require being part of a group and
2247- * cannot exist on their own, schedule them out and move them into the ERROR
2248- * state. Also see _perf_event_enable(), it will not be able to recover
2249- * this ERROR state.
2250- */
2251- static inline void perf_remove_sibling_event (struct perf_event * event )
2252- {
2253- event_sched_out (event , event -> ctx );
2254- perf_event_set_state (event , PERF_EVENT_STATE_ERROR );
2255- }
2256-
22572245static void perf_group_detach (struct perf_event * event )
22582246{
22592247 struct perf_event * leader = event -> group_leader ;
@@ -2289,8 +2277,15 @@ static void perf_group_detach(struct perf_event *event)
22892277 */
22902278 list_for_each_entry_safe (sibling , tmp , & event -> sibling_list , sibling_list ) {
22912279
2280+ /*
2281+ * Events that have PERF_EV_CAP_SIBLING require being part of
2282+ * a group and cannot exist on their own, schedule them out
2283+ * and move them into the ERROR state. Also see
2284+ * _perf_event_enable(), it will not be able to recover this
2285+ * ERROR state.
2286+ */
22922287 if (sibling -> event_caps & PERF_EV_CAP_SIBLING )
2293- perf_remove_sibling_event (sibling );
2288+ __event_disable (sibling , ctx , PERF_EVENT_STATE_ERROR );
22942289
22952290 sibling -> group_leader = sibling ;
22962291 list_del_init (& sibling -> sibling_list );
@@ -2562,6 +2557,15 @@ static void perf_remove_from_context(struct perf_event *event, unsigned long fla
25622557 event_function_call (event , __perf_remove_from_context , (void * )flags );
25632558}
25642559
2560+ static void __event_disable (struct perf_event * event ,
2561+ struct perf_event_context * ctx ,
2562+ enum perf_event_state state )
2563+ {
2564+ event_sched_out (event , ctx );
2565+ perf_cgroup_event_disable (event , ctx );
2566+ perf_event_set_state (event , state );
2567+ }
2568+
25652569/*
25662570 * Cross CPU call to disable a performance event
25672571 */
@@ -2576,13 +2580,18 @@ static void __perf_event_disable(struct perf_event *event,
25762580 perf_pmu_disable (event -> pmu_ctx -> pmu );
25772581 ctx_time_update_event (ctx , event );
25782582
2583+ /*
2584+ * When disabling a group leader, the whole group becomes ineligible
2585+ * to run, so schedule out the full group.
2586+ */
25792587 if (event == event -> group_leader )
25802588 group_sched_out (event , ctx );
2581- else
2582- event_sched_out (event , ctx );
25832589
2584- perf_event_set_state (event , PERF_EVENT_STATE_OFF );
2585- perf_cgroup_event_disable (event , ctx );
2590+ /*
2591+ * But only mark the leader OFF; the siblings will remain
2592+ * INACTIVE.
2593+ */
2594+ __event_disable (event , ctx , PERF_EVENT_STATE_OFF );
25862595
25872596 perf_pmu_enable (event -> pmu_ctx -> pmu );
25882597}
0 commit comments